Skip to content

Commit

Permalink
Merge branch 'harvester:master' into feat/airgap-harvester-airgap-ran…
Browse files Browse the repository at this point in the history
…cher-install-test
  • Loading branch information
irishgordo authored Feb 2, 2024
2 parents 22af746 + 5e98567 commit bc2a295
Show file tree
Hide file tree
Showing 58 changed files with 1,161 additions and 590 deletions.
102 changes: 102 additions & 0 deletions .github/workflows/vagrant-install.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
name: Vagrant install

on:
pull_request:
types: [opened, reopened, synchronize]
push:
branches:
- master
- 'v**'
schedule:
- cron: '30 19 * * *'

env:
LIBVIRT_DEFAULT_URI: "qemu:///system"

jobs:
main:
name: Build and deploy
runs-on:
- self-hosted
- Linux
- kvm
- vagrant
- equinix
steps:
- uses: actions/checkout@v3
- name: Build Harvester artifacts
run: |
make
- name: Clone and checkout ipxe-examples
id: ipxe
run: |
cd $HOME
if [ ! -d ipxe-examples ]; then
git clone https://github.com/harvester/ipxe-examples.git
fi
cd ipxe-examples
git reset && git checkout .
git clean -fd
git pull
echo "VAGRANT_HOME=$HOME/ipxe-examples/vagrant-pxe-harvester" >> $GITHUB_OUTPUT
- name: Clean up previous vagrant deployment
working-directory: ${{ steps.ipxe.outputs.VAGRANT_HOME }}
run: |
vagrant destroy -f
- name: Remove OVMF.fd line if needed
working-directory: ${{ steps.ipxe.outputs.VAGRANT_HOME }}
run: |
if [ ! -f /usr/share/qemu/OVMF.fd ]; then
echo "Remove libvirt loader: can't find UEFI firmware"
sed 's/libvirt.loader.*/#libvirt.loader = /' Vagrantfile
fi
- name: Generate SSH keys
run: |
ssh-keygen -t rsa -q -N "" -f ./ci/terraform/tmp-ssh-key
- name: Set SSH key in ipxe-examples settings
run: |
export PUB_KEY=$(cat ./ci/terraform/tmp-ssh-key.pub)
yq e -i ".harvester_config.ssh_authorized_keys += [ strenv(PUB_KEY) ]" ${{ steps.ipxe.outputs.VAGRANT_HOME }}/settings.yml
- name: Set artifacts in ipxe-examples settings
run: |
yq e -i ".harvester_iso_url = \"file://${{ github.workspace }}/dist/artifacts/harvester-master-amd64.iso\"" ${{ steps.ipxe.outputs.VAGRANT_HOME }}/settings.yml
yq e -i ".harvester_kernel_url = \"file://${{ github.workspace }}/dist/artifacts/harvester-master-vmlinuz-amd64\"" ${{ steps.ipxe.outputs.VAGRANT_HOME }}/settings.yml
yq e -i ".harvester_ramdisk_url = \"file://${{ github.workspace }}/dist/artifacts/harvester-master-initrd-amd64\"" ${{ steps.ipxe.outputs.VAGRANT_HOME }}/settings.yml
yq e -i ".harvester_rootfs_url = \"file://${{ github.workspace }}/dist/artifacts/harvester-master-rootfs-amd64.squashfs\"" ${{ steps.ipxe.outputs.VAGRANT_HOME }}/settings.yml
- name: Setup cluster
working-directory: ${{ steps.ipxe.outputs.VAGRANT_HOME }}
run: |
./setup_harvester.sh
- name: Enable soft emulation
working-directory: ./ci/terraform
run: |
./enable_soft_emulation.sh ${{ steps.ipxe.outputs.VAGRANT_HOME }}/settings.yml
- name: Clean the previous temp files
working-directory: ./ci/terraform
run: |
./cleanup_test_files.sh
- name: Testing existing files
working-directory: ./ci/terraform
run: |
./check_files.sh ${{ steps.ipxe.outputs.VAGRANT_HOME }}/settings.yml
- name: Testing services status
working-directory: ./ci/terraform
run: |
./check_services_status.sh ${{ steps.ipxe.outputs.VAGRANT_HOME }}/settings.yml
- name: Testing basic operations with terraform
working-directory: ./ci/terraform
run: |
curl https://releases.hashicorp.com/terraform/1.3.7/terraform_1.3.7_linux_amd64.zip -o terraform_bin.zip
unzip -o terraform_bin.zip
./get_kubeconfig.sh ${{ steps.ipxe.outputs.VAGRANT_HOME }}/settings.yml
./terraform init -no-color
./terraform apply -auto-approve -no-color
- name: Test network on the VMs
working-directory: ./ci/terraform
run: |
./test_terraform_vm.sh ${{ steps.ipxe.outputs.VAGRANT_HOME }}/settings.yml
- name: Clean up vagrant cluster
working-directory: ${{ steps.ipxe.outputs.VAGRANT_HOME }}
run: |
vagrant destroy -f
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -4,5 +4,6 @@
/dist
*.swp
.idea
.vagrant
/package/harvester-os/iso/boot/grub2/harvester.cfg
/package/harvester-os/harvester-release.yaml
16 changes: 8 additions & 8 deletions Dockerfile.dapper
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
FROM quay.io/costoolkit/releases-green:luet-toolchain-0.21.2 as luet
FROM quay.io/costoolkit/elemental-cli:v0.2.5 as elemental

FROM quay.io/costoolkit/releases-teal:grub2-live-0.0.4-2 as grub2-mbr
FROM quay.io/costoolkit/releases-teal:grub2-efi-image-live-0.0.4-2 as grub2-efi
FROM registry.suse.com/bci/golang:1.20

ARG http_proxy=$http_proxy
Expand All @@ -21,24 +20,25 @@ RUN zypper -n rm container-suseconnect && \
zypper -n install git curl docker gzip tar wget zstd squashfs xorriso awk jq mtools dosfstools unzip rsync
RUN curl -sfL https://github.com/mikefarah/yq/releases/download/v4.21.1/yq_linux_${ARCH} -o /usr/bin/yq && chmod +x /usr/bin/yq
RUN if [ "${ARCH}" == "amd64" ]; then \
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.49.0; \
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.55.2; \
fi

RUN zypper addrepo http://download.opensuse.org/distribution/leap/15.4/repo/oss/ oss && \
zypper --gpg-auto-import-keys refresh && \
zypper in -y qemu-x86 qemu-tools

RUN mkdir /grub2-mbr
COPY --from=grub2-mbr / /grub2-mbr
RUN mkdir /grub2-efi
COPY --from=grub2-efi / /grub2-efi

# set up helm
ENV HELM_VERSION v3.3.1
ENV HELM_URL=https://get.helm.sh/helm-${HELM_VERSION}-linux-${ARCH}.tar.gz
RUN mkdir /usr/tmp && \
curl ${HELM_URL} | tar xvzf - --strip-components=1 -C /usr/tmp/ && \
mv /usr/tmp/helm /usr/bin/helm

# luet & elemental
COPY --from=luet /usr/bin/luet /usr/bin/luet
COPY --from=elemental /usr/bin/elemental /usr/bin/elemental

# You cloud defined your own rke2 url by setup `RKE2_IMAGE_REPO`
ENV DAPPER_ENV REPO TAG DRONE_TAG DRONE_BRANCH CROSS RKE2_IMAGE_REPO USE_LOCAL_IMAGES BUILD_QCOW DRONE_BUILD_EVENT
ENV DAPPER_SOURCE /go/src/github.com/harvester/harvester-installer/
Expand Down
88 changes: 85 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,16 +2,98 @@ harvester-installer
========
[![Build Status](https://drone-publish.rancher.io/api/badges/harvester/harvester-installer/status.svg)](https://drone-publish.rancher.io/harvester/harvester-installer)

Repo for building the [harvester](https://github.com/harvester/harvester) ISO image.
Repo for building the [Harvester](https://github.com/harvester/harvester)
ISO image. This includes the various scripts necessary to build the ISO
itself, plus the `harvester-installer` binary and related scripts that
perform system installation when the ISO is booted.

## Building

To build an ISO image, run:

`make`

Built ISO image is located in the `dist/artifacts` directory.
This will:

1. Build the `harvester-installer` binary.
2. Create an archive of all the necessary Harvester and Rancher charts
and container images.
3. Create the `harvester-cluster-repo` container image, which provides
a helm repository including the charts from the previous step.
4. Package everything from the above steps into an ISO image. The ISO
image is built using [Elemental Toolkit](https://github.com/rancher/elemental-toolkit/),
and is based on [harvester/os2](https://github.com/harvester/os2),
which in turn is based on [SLE Micro](https://www.suse.com/products/micro/).

The built ISO image is written to the `dist/artifacts` directory.

## Harvester Installation Process

Harvester can be installed by either [booting the Harvester ISO](https://docs.harvesterhci.io/v1.2/install/index/),
or via [PXE Boot](https://docs.harvesterhci.io/v1.2/install/pxe-boot-install).
When booting via ISO, `harvester-installer` runs interactively on the
system console to allow you to configure the system. When booting via
PXE, you don't get the interactive installer - instead you need to
provide YAML files specifying the configuration to apply.

In both cases (ISO boot and PXE boot), the `harvester-installer` binary
still _runs_ in order to provision the system. This is put in place by
[system/oem/91_installer.yaml](https://github.com/harvester/harvester-installer/blob/master/package/harvester-os/files/system/oem/91_installer.yaml)
which in turn calls [setup-installer.sh](https://github.com/harvester/harvester-installer/blob/master/package/harvester-os/files/usr/bin/setup-installer.sh)
to start the installer on tty1.

When booted via ISO, the installer will prompt for configuration
information (create a new cluster / join an existing cluster, what
disks to use, network config, etc.). When booted via PXE, the kernel
command line parameter `harvester.install.automatic=true` causes the
interactive part to be skipped, and config will be retrieved from the
URL specified by `harvester.install.config_url`.

The installer will run some preflight checks to ensure the system
meets minimum hardware requirements. If any of these checks
fail when run interactively, the first page of the installer will
indicate which checks failed, and give you the option to proceed or
not. When installing via PXE, if any checks fail, installation will
abort and the failed checks will be visible on the system console,
and also logged to /var/log/console.log in the installation environment.
If you wish to bypass the preflight checks for testing purposes during
automated installation, set the `harvester.install.skipchecks=true`
kernel command line parmaeter.

Either way (ISO or PXE), the installer writes the final config out to
a temporary file which is passed to [harv-install](https://github.com/harvester/harvester-installer/blob/master/package/harvester-os/files/usr/sbin/harv-install)
which in turn calls `elemental install` to provision the system.
The harv-install script also preloads all the container images.
Finally the system is rebooted.

On the newly installed system, `harvester-installer` remains active
on the console in order to show the cluster management URL along with
the current node's hostname and IP address.

## Hacking the Interactive Part of `harvester-installer`

Ordinarily `harvester-installer` needs to be run from a booted ISO
so it can actually install a system. But, if you're only working
on changes to the interactive part of the installer (e.g. adding
or changing fields, or altering the workflow) and don't actually
need to perform final installation, the binary can be quickly tested
using vagrant with vagrant-libvirt. Here's how:

```sh
$ USE_LOCAL_IMAGES=true make build
$ vagrant up
$ vagrant ssh
> sudo DEBUG=true TTY=/dev/tty /vagrant/harvester-installer
```

Be sure the terminal window you use is fairly tall, or it will bail
out with "panic: invalid dimensions" after you get past the networking
screen. To break out of the installer, hit CTRL-C. If you rebuild
the binary it can be sync'd back to a running vagrant box with
`vagrant rsync`.

## License
Copyright (c) 2022 [Rancher Labs, Inc.](http://rancher.com)
Copyright (c) 2024 [Rancher Labs, Inc.](http://rancher.com)

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Expand Down
95 changes: 95 additions & 0 deletions Vagrantfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :

# All Vagrant configuration is done below. The "2" in Vagrant.configure
# configures the configuration version (we support older styles for
# backwards compatibility). Please don't change it unless you know what
# you're doing.
Vagrant.configure("2") do |config|
# The most common configuration options are documented and commented below.
# For a complete reference, please see the online documentation at
# https://docs.vagrantup.com.

# Every Vagrant development environment requires a box. You can search for
# boxes at https://vagrantcloud.com/search.
config.vm.box = "opensuse/Leap-15.4.x86_64"

# Disable automatic box update checking. If you disable this, then
# boxes will only be checked for updates when the user runs
# `vagrant box outdated`. This is not recommended.
# config.vm.box_check_update = false

# Create a forwarded port mapping which allows access to a specific port
# within the machine from a port on the host machine. In the example below,
# accessing "localhost:8080" will access port 80 on the guest machine.
# NOTE: This will enable public access to the opened port
# config.vm.network "forwarded_port", guest: 80, host: 8080

# Create a forwarded port mapping which allows access to a specific port
# within the machine from a port on the host machine and only allow access
# via 127.0.0.1 to disable public access
# config.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: "127.0.0.1"

# Create a private network, which allows host-only access to the machine
# using a specific IP.
# config.vm.network "private_network", ip: "192.168.33.10"

# Create a public network, which generally matched to bridged network.
# Bridged networks make the machine appear as another physical device on
# your network.
# config.vm.network "public_network"

# Share an additional folder to the guest VM. The first argument is
# the path on the host to the actual folder. The second argument is
# the path on the guest to mount the folder. And the optional third
# argument is a set of non-required options.
# config.vm.synced_folder "../data", "/vagrant_data"

# Disable the default share of the current code directory. Doing this
# provides improved isolation between the vagrant box and your host
# by making sure your Vagrantfile isn't accessable to the vagrant box.
# If you use this you may want to enable additional shared subfolders as
# shown above.
# config.vm.synced_folder ".", "/vagrant", disabled: true
config.vm.synced_folder "bin", "/vagrant"

# Provider-specific configuration so you can fine-tune various
# backing providers for Vagrant. These expose provider-specific options.
# Example for VirtualBox:
#
# config.vm.provider "virtualbox" do |vb|
# # Display the VirtualBox GUI when booting the machine
# vb.gui = true
#
# # Customize the amount of memory on the VM:
# vb.memory = "1024"
# end
#
# View the documentation for the provider you are using for more
# information on available options.

config.vm.provider :libvirt do |libvirt|
libvirt.storage :file, :size => '500G'
end

# Enable provisioning with a shell script. Additional provisioners such as
# Ansible, Chef, Docker, Puppet and Salt are also available. Please see the
# documentation for more information about their specific syntax and use.
# config.vm.provision "shell", inline: <<-SHELL
# apt-get update
# apt-get install -y apache2
# SHELL
config.vm.provision "shell", inline: <<-SHELL
zypper ar --no-gpgcheck https://download.opensuse.org/repositories/home:/vcheng:/Packages/15.4/home:vcheng:Packages.repo
zypper --gpg-auto-import-keys refresh
zypper --non-interactive in yip
echo -e '#!/bin/sh\necho "fake $0"' > /usr/local/bin/fake
chmod a+x /usr/local/bin/fake
for f in /usr/sbin/harv-install /usr/sbin/cos-installer-shutdown ; do
ln -s /usr/local/bin/fake $f
done
echo
echo 'Run `vagrant ssh` then `sudo DEBUG=true TTY=/dev/tty /vagrant/harvester-installer` to test'
echo
SHELL
end
Loading

0 comments on commit bc2a295

Please sign in to comment.