diff --git a/README.md b/README.md index 23942dd..b66003d 100644 --- a/README.md +++ b/README.md @@ -85,10 +85,9 @@ The following table provides a sample cost breakdown for deploying this Guidance * An [AWS Identity and Access Management](http://aws.amazon.com/iam) (IAM) user with administrator access * [Configured AWS credentials](https://docs.aws.amazon.com/cdk/latest/guide/getting_started.html#getting_started_prerequisites) * [Docker](https://docs.docker.com/get-docker/), [Node.js](https://nodejs.org/en/download/) - , [Python 3.9](https://www.python.org/downloads/release/python-3916), [pip](https://pip.pypa.io/en/stable/installing/), + , [>=Python 3.11](https://www.python.org/downloads/release/python-3110/), [pip](https://pip.pypa.io/en/stable/installing/), and [jq](https://stedolan.github.io/jq/) installed on the workstation that you plan to deploy the guidance from. -Note that the guidance is **only** compatible with Python 3.9. ### Deploy with AWS CDK @@ -177,6 +176,27 @@ cdk deploy prodNitroSigner -O output.json Follow all subsequent steps from the dev deployment pointed out above. +## Troubleshooting + +**Docker Image Push/Pull Error** +* On `building` instance during `cdk deploy` step: +```shell +devNitroWalletEth: fail: docker push 012345678910.dkr.ecr.us-east-1.amazonaws.com/cdk-hnb659fds-container-assets-012345678910-us-east-1:ab3fe... exited with error code 1: failed commit on ref "manifest-sha256:7141...": unexpected status from PUT request to https://012345678910.dkr.ecr.us-east-1.amazonaws.com/v2/cdk-hnb659fds-container-assets-012345678910-us-east-1/manifests/ab3fe...: 400 Bad Request +Failed to publish asset ab3fe...:012345678910-us-east-1 +``` + +* On EC2 instance pulling docker container +```shell +ab3fe...: Pulling from cdk-hnb659fds-container-assets-012345678910-us-east-1 +unsupported media type application/vnd.in-toto+json +``` + +**Solution** +* Issue might be related building and publishing docker containers from an `arm` based instances such as Apple Silicon, requiring docker `buildx` [issue](https://github.com/aws/aws-cdk/issues/30258) +* Cleanup images from local docker repository (`docker rmi ...`) and from Amazon Elastic Container Registry (ECR) e.g. via AWS console +* Set environment variable in terminal session (`export BUILDX_NO_DEFAULT_ATTESTATIONS=1`) or specify it during cdk deployment (`BUILDX_NO_DEFAULT_ATTESTATIONS=1 cdk deploy`) + + ## Security See [CONTRIBUTING](CONTRIBUTING.md#security-issue-notifications) for more information. diff --git a/application/eth2/lambda/layer/requirements.txt b/application/eth2/lambda/layer/requirements.txt index c636a8e..553c57d 100644 --- a/application/eth2/lambda/layer/requirements.txt +++ b/application/eth2/lambda/layer/requirements.txt @@ -1,3 +1,3 @@ -cryptography==39.0.2 ; python_version >= "3.9" and python_version < "4" -requests>=2.28.1 ; python_version >= "3.9" and python_version < "4" -urllib3<2; python_version >= "3.9" and python_version < "4" \ No newline at end of file +cryptography==41.0.7 +requests>=2.31.0 +urllib3==2.1.0 diff --git a/application/eth2/watchdog/watchdog.py b/application/eth2/watchdog/watchdog.py index ccf8c25..7f2582b 100644 --- a/application/eth2/watchdog/watchdog.py +++ b/application/eth2/watchdog/watchdog.py @@ -28,29 +28,51 @@ _logger.addHandler(handler) -def get_aws_session_token() -> dict: +def get_imds_token(): http_ec2_client = client.HTTPConnection("169.254.169.254") - http_ec2_client.request("GET", "/latest/meta-data/iam/security-credentials/") - r = http_ec2_client.getresponse() + headers = { + "X-aws-ec2-metadata-token-ttl-seconds": "21600" # Token valid for 6 hours + } + http_ec2_client.request("PUT", "/latest/api/token", headers=headers) + token_response = http_ec2_client.getresponse() + return token_response.read().decode() - instance_profile_name = r.read().decode() - http_ec2_client = client.HTTPConnection("169.254.169.254") - http_ec2_client.request( - "GET", - f"/latest/meta-data/iam/security-credentials/{instance_profile_name}", - ) - r = http_ec2_client.getresponse() +def get_aws_session_token(): + try: + token = get_imds_token() - response = json.loads(r.read()) + http_ec2_client = client.HTTPConnection("169.254.169.254") + headers = {"X-aws-ec2-metadata-token": token} - credential = { - "access_key_id": response["AccessKeyId"], - "secret_access_key": response["SecretAccessKey"], - "token": response["Token"], - } + # Get instance profile name + http_ec2_client.request( + "GET", + "/latest/meta-data/iam/security-credentials/", + headers=headers + ) + r = http_ec2_client.getresponse() + instance_profile_name = r.read().decode() + + # Get credentials + http_ec2_client.request( + "GET", + f"/latest/meta-data/iam/security-credentials/{instance_profile_name}", + headers=headers + ) + r = http_ec2_client.getresponse() + response = json.loads(r.read()) + return { + "access_key_id": response["AccessKeyId"], + "secret_access_key": response["SecretAccessKey"], + "token": response["Token"], + } - return credential + except Exception as e: + raise Exception(f"Failed to retrieve instance credentials: {str(e)}") + finally: + if 'http_ec2_client' in locals(): + http_ec2_client.close() def get_cloudformation_stack_id(cf_stack_name: str) -> str: @@ -92,8 +114,8 @@ def nitro_cli_describe_call(name: str = None) -> bool: return False if ( - response[0].get("EnclaveName") != name - and response[0].get("State") != "Running" + response[0].get("EnclaveName") != name + and response[0].get("State") != "Running" ): return False @@ -230,7 +252,7 @@ def get_encrypted_tls_key(tls_keys_table_name: str, key_id=1) -> str: def init_web3signer_call( - tls_keys_table_name: str, cf_stack_name: str, validator_keys_table_name: str + tls_keys_table_name: str, cf_stack_name: str, validator_keys_table_name: str ) -> None: uuid = get_cloudformation_stack_id(cf_stack_name) encrypted_validator_keys = get_encrypted_validator_keys( @@ -238,11 +260,11 @@ def init_web3signer_call( ) encrypted_tls_key = get_encrypted_tls_key(tls_keys_table_name=tls_keys_table_name) - credential = get_aws_session_token() + credentials = get_aws_session_token() payload = { "operation": "init", - "credential": credential, + "credential": credentials, "encrypted_tls_key": encrypted_tls_key, "encrypted_validator_keys": encrypted_validator_keys, } diff --git a/nitro_wallet/nitro_wallet_stack.py b/nitro_wallet/nitro_wallet_stack.py index 81ccffe..f18cf84 100644 --- a/nitro_wallet/nitro_wallet_stack.py +++ b/nitro_wallet/nitro_wallet_stack.py @@ -94,6 +94,9 @@ def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: "EthereumSigningServerImage", directory="./application/{}/server".format(application_type), build_args={"REGION_ARG": self.region, "LOG_LEVEL_ARG": log_level}, + platform=ecr_assets.Platform.LINUX_AMD64, + asset_name="EthereumSigningServerImage" + ) signing_enclave_image = ecr_assets.DockerImageAsset( @@ -101,6 +104,8 @@ def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: "EthereumSigningEnclaveImage", directory="./application/{}/enclave".format(application_type), build_args={"REGION_ARG": self.region, "LOG_LEVEL_ARG": log_level}, + platform=ecr_assets.Platform.LINUX_AMD64, + asset_name="EthereumSigningEnclaveImage" ) watchdog = s3_assets.Asset( @@ -152,7 +157,7 @@ def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: nitro_instance_sg.add_ingress_rule(signer_client_sg, ec2.Port.tcp(8443)) # AMI - amzn_linux = ec2.MachineImage.latest_amazon_linux(generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2) + amzn_linux = ec2.MachineImage.latest_amazon_linux2() # Instance Role and SSM Managed Policy role = iam.Role( @@ -227,6 +232,7 @@ def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: block_devices=[block_device], role=role, security_group=nitro_instance_sg, + http_put_response_hop_limit=3 ) nitro_nlb = elbv2.NetworkLoadBalancer( @@ -292,7 +298,7 @@ def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: self, "NitroInvokeLambdaLayer", entry="application/{}/lambda/layer".format(params["application_type"]), - compatible_runtimes=[lambda_.Runtime.PYTHON_3_9], + compatible_runtimes=[lambda_.Runtime.PYTHON_3_11], ) invoke_lambda = lambda_python.PythonFunction( @@ -301,7 +307,7 @@ def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: entry="application/{}/lambda/NitroInvoke".format(params["application_type"]), handler="lambda_handler", index="lambda_function.py", - runtime=lambda_.Runtime.PYTHON_3_9, + runtime=lambda_.Runtime.PYTHON_3_11, timeout=Duration.minutes(2), memory_size=256, environment={ @@ -314,6 +320,7 @@ def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: vpc=vpc, vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS), security_groups=[signer_client_sg], + architecture=lambda_.Architecture.X86_64 ) encryption_key.grant_encrypt(invoke_lambda) diff --git a/requirements-dev.txt b/requirements-dev.txt index 1078a19..ef5ce5e 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,5 +1,5 @@ -pytest==6.2.5 ; python_version >= "3.9" and python_version < "3.10" -black>=22.10.0 ; python_version >= "3.9" and python_version < "3.10" -pre-commit>=2.20.0 ; python_version >= "3.9" and python_version < "3.10" -bandit>=1.7.4 ; python_version >= "3.9" and python_version < "3.10" -flake8==7.0.0 ; python_version >= "3.9" and python_version < "3.10" +pytest==7.4.4 +black>=23.12.1 +pre-commit>=3.6.0 +bandit>=1.7.6 +flake8==7.0.0 diff --git a/requirements.txt b/requirements.txt index 0ed5d8b..291cf5b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -aws-cdk-lib==2.51.1 ; python_version >= "3.9" and python_version < "3.10" -constructs>=10.0.0,<11.0.0 ; python_version >= "3.9" and python_version < "3.10" -aws-cdk.aws-lambda-python-alpha==2.51.0a0 ; python_version >= "3.9" and python_version < "3.10" -cdk-nag>=2.21.11 ; python_version >= "3.9" and python_version < "3.10" \ No newline at end of file +aws-cdk-lib==2.98.0 +constructs==10.1.271 +aws-cdk.aws-lambda-python-alpha==2.51.0a0 +cdk-nag==2.27.88 \ No newline at end of file diff --git a/scripts/generate_key_policy.sh b/scripts/generate_key_policy.sh index 23df916..1065874 100755 --- a/scripts/generate_key_policy.sh +++ b/scripts/generate_key_policy.sh @@ -3,28 +3,30 @@ # SPDX-License-Identifier: MIT-0 set -e set +x +set -o pipefail output=${1} secure_keygen_stack_name=${2} # instance id -stack_name=$(jq -r '. |= keys | .[0]' ${output}) -asg_name=$(jq -r '."'${stack_name}'".ASGGroupName' ${output}) -instance_id=$(./scripts/get_asg_instances.sh ${asg_name} | head -n 1) +stack_name=$(jq -r '. |= keys | .[0]' "${output}") +asg_name=$(jq -r '."'${stack_name}'".ASGGroupName' "${output}") +instance_id=$(./scripts/get_asg_instances.sh "${asg_name}" | head -n 1) # pcr_0 # 000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 for debug -pcr_0=$(./scripts/get_pcr0.sh ${instance_id}) +pcr_0=$(./scripts/get_pcr0.sh "${instance_id}") # ec2 role -ec2_role_arn=$(jq -r ".${stack_name}.EC2InstanceRoleARN" ${output}) +ec2_role_arn=$(jq -r ".${stack_name}.EC2InstanceRoleARN" "${output}") # lambda role -lambda_execution_arn=$(jq -r ".${stack_name}.LambdaExecutionArn" ${output}) +lambda_execution_arn=$(jq -r ".${stack_name}.LambdaExecutionArn" "${output}") if [[ -n "${secure_keygen_stack_name}" ]]; then echo "Retrieving ValidatorKeyGenFunction Lambda Role of $secure_keygen_stack_name deployed in https://github.com/aws-samples/eth-keygen-lambda-sam" imported_lambda_execution_arn=$(aws cloudformation describe-stacks \ - --stack-name $secure_keygen_stack_name \ + --region "${CDK_DEPLOY_REGION}" \ + --stack-name "${secure_keygen_stack_name}" \ --query "Stacks[0].Outputs[?OutputKey=='ValidatorKeyGenFunctionIamRole'].OutputValue | [0]" \ --output text) fi diff --git a/scripts/get_asg_instances.sh b/scripts/get_asg_instances.sh index 793df55..16e710c 100755 --- a/scripts/get_asg_instances.sh +++ b/scripts/get_asg_instances.sh @@ -5,4 +5,7 @@ set +x set -e # avoid old terminated instances -aws autoscaling describe-auto-scaling-groups --region "${CDK_DEPLOY_REGION}" --auto-scaling-group-name "${1}" | jq -r '.AutoScalingGroups[0].Instances[] | select ( .LifecycleState | contains("InService")) | .InstanceId ' +aws autoscaling describe-auto-scaling-groups \ +--region "${CDK_DEPLOY_REGION}" \ +--auto-scaling-group-name "${1}" \ +| jq -r '.AutoScalingGroups[0].Instances[] | select ( .LifecycleState | contains("InService")) | .InstanceId ' diff --git a/scripts/get_pcr0.sh b/scripts/get_pcr0.sh index 3f3df7d..f6d00f1 100755 --- a/scripts/get_pcr0.sh +++ b/scripts/get_pcr0.sh @@ -4,16 +4,51 @@ set +x set -e -flag_id=$(aws ssm send-command --region "${CDK_DEPLOY_REGION}" --document-name "AWS-RunShellScript" --instance-ids "${1}" --parameters 'commands=["sudo cat /etc/environment | head -n 1 | tr \"=\" \"\n\" | tail -n 1"]' | jq -r '.Command.CommandId') -flags=$(aws ssm list-command-invocations --region "${CDK_DEPLOY_REGION}" --instance-id "${1}" --command-id "${flag_id}" --details | jq -r '.CommandInvocations[0].CommandPlugins[0].Output') +flag_id=$(aws ssm send-command \ + --region "${CDK_DEPLOY_REGION}" \ + --document-name "AWS-RunShellScript" \ + --instance-ids "${1}" \ + --parameters 'commands=["sudo cat /etc/environment | head -n 1 | tr \"=\" \"\n\" | tail -n 1"]' \ + | jq -r '.Command.CommandId') +flags=$(aws ssm list-command-invocations \ + --region "${CDK_DEPLOY_REGION}" \ + --instance-id "${1}" \ + --command-id "${flag_id}" \ + --details \ + | jq -r '.CommandInvocations[0].CommandPlugins[0].Output') + +# validate that flags value has been read correctly from ec2 instance - it should be either true or false +if [[ "${flags}" != "TRUE" && "${flags}" != "FALSE" ]]; then + echo "flags is not true or false" + exit 1 +fi + +# if debug flag is true, provide 000 string in key policy, otherwise get PCR value from eif file running on EC2 instance if [[ "${flags}" == "TRUE" ]]; then pcr_0="000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" else - command_id=$(aws ssm send-command --document-name "AWS-RunShellScript" --instance-ids "${1}" --parameters 'commands=["sudo nitro-cli describe-eif --eif-path /home/ec2-user/app/server/signing_server.eif | jq -r '.Measurements.PCR0'"]' | jq -r '.Command.CommandId') + command_id=$(aws ssm send-command \ + --region "${CDK_DEPLOY_REGION}" \ + --document-name "AWS-RunShellScript" \ + --instance-ids "${1}" \ + --parameters 'commands=["sudo nitro-cli describe-eif --eif-path /home/ec2-user/app/server/signing_server.eif | jq -r '.Measurements.PCR0'"]' \ + | jq -r '.Command.CommandId') + # takes about 5sec to return the pcr0 value from a non running enclave - sleep 7 - pcr_0=$(aws ssm list-command-invocations --instance-id "${1}" --command-id "${command_id}" --details | jq -r '.CommandInvocations[0].CommandPlugins[0].Output') + sleep 10 + pcr_0=$(aws ssm list-command-invocations \ + --region "${CDK_DEPLOY_REGION}" \ + --instance-id "${1}" \ + --command-id "${command_id}" \ + --details \ + | jq -r '.CommandInvocations[0].CommandPlugins[0].Output') +fi + +# ensure that pcr0 is not empty +if [[ "${pcr_0}" == "" ]]; then + echo "pcr_0 is empty" + exit 1 fi echo "${pcr_0}" diff --git a/scripts/load_validator_keys/load_validator_keys.py b/scripts/load_validator_keys/load_validator_keys.py index 4e105bb..77739fc 100644 --- a/scripts/load_validator_keys/load_validator_keys.py +++ b/scripts/load_validator_keys/load_validator_keys.py @@ -37,12 +37,16 @@ logger.addHandler(handler) logger.propagate = False +region = os.getenv("CDK_DEPLOY_REGION", "us-east-1") + kms_key_arn = os.getenv("KMS_KEY_ARN") table_name = os.getenv("DDB_TABLE_NAME") cf_stack_name = os.getenv("CF_STACK_NAME") -client_kms = boto3.client("kms") -dynamodb = boto3.resource("dynamodb") +client_kms = boto3.client(service_name="kms", + region_name=region) +dynamodb = boto3.resource(service_name="dynamodb", + region_name=region) words_list_path = "word_lists" @@ -50,7 +54,8 @@ def get_cloudformation_stack_id(cf_stack_name): """Get CF Stack ID""" - client = boto3.client(service_name="cloudformation") + client = boto3.client(service_name="cloudformation", + region_name=region) try: response = client.describe_stacks( @@ -78,12 +83,11 @@ def verify_keystore(credential: Credential, keystore: Keystore, password: str) - def main( - num_validators=5, - mnemonic_language="english", - chain="goerli", - eth1_withdrawal_address="0x6F4b46423fc6181a0cF34e6716c220BD4d6C2471", + num_validators=5, + mnemonic_language="english", + chain="sepolia", + eth1_withdrawal_address="0x6F4b46423fc6181a0cF34e6716c220BD4d6C2471", ) -> list: - if kms_key_arn is None: raise ValueError("Specify KMS_KEY_ARN environment variable") diff --git a/scripts/load_validator_keys/requirements.txt b/scripts/load_validator_keys/requirements.txt index d45a20d..bda8c99 100644 --- a/scripts/load_validator_keys/requirements.txt +++ b/scripts/load_validator_keys/requirements.txt @@ -1,22 +1,22 @@ -boto3>=1.26.14 ; python_version >= "3.9" and python_version < "3.10" -botocore>=1.29.14 ; python_version >= "3.9" and python_version < "3.10" -cached-property==1.5.2 ; python_version >= "3.9" and python_version < "3.10" -click==8.1.3 ; python_version >= "3.9" and python_version < "3.10" -cytoolz==0.11.2 ; python_version >= "3.9" and python_version < "3.10" -eth-hash==0.3.2 ; python_version >= "3.9" and python_version < "3.10" -eth-typing==2.3.0 ; python_version >= "3.9" and python_version < "3.10" -eth-utils==1.10.0 ; python_version >= "3.9" and python_version < "3.10" -jmespath==1.0.1 ; python_version >= "3.9" and python_version < "3.10" -lru-dict==1.1.7 ; python_version >= "3.9" and python_version < "3.10" -mypy-extensions==0.4.3 ; python_version >= "3.9" and python_version < "3.10" -py-ecc==5.2.0 ; python_version >= "3.9" and python_version < "3.10" -pycryptodome==3.14.1 ; python_version >= "3.9" and python_version < "3.10" -pyrsistent==0.16.1 ; python_version >= "3.9" and python_version < "3.10" -python-dateutil==2.8.2 ; python_version >= "3.9" and python_version < "3.10" -s3transfer==0.6.0 ; python_version >= "3.9" and python_version < "3.10" -six==1.16.0 ; python_version >= "3.9" and python_version < "3.10" -ssz==0.2.4 ; python_version >= "3.9" and python_version < "3.10" -staking-deposit @ git+https://github.com/ethereum/staking-deposit-cli.git@v2.3.0 ; python_version >= "3.9" and python_version < "3.10" -toolz==0.11.2 ; python_version >= "3.9" and python_version < "3.10" -urllib3==1.26.12 ; python_version >= "3.9" and python_version < "3.10" -cryptography==39.0.2 ; python_version >= "3.9" and python_version < "3.10" +boto3==1.34.14 +botocore==1.34.14 +cached-property==1.5.2 +click==8.1.7 +cytoolz==0.12.2 +eth-hash==0.5.2 +eth-typing==3.5.1 +eth-utils==2.3.1 +jmespath==1.0.1 +lru-dict==1.2.0 +mypy-extensions==1.0.0 +py-ecc==6.0.0 +pycryptodome==3.19.0 +pyrsistent>=0.16.0 +python-dateutil==2.8.2 +s3transfer==0.10.0 +six==1.16.0 +ssz==0.3.1 +staking-deposit @ git+https://github.com/ethereum/staking-deposit-cli.git@v2.8.0 +toolz==0.12.0 +urllib3<2.1.0 +cryptography==41.0.7 diff --git a/scripts/start_signing_service.sh b/scripts/start_signing_service.sh index 93c0b62..ae42da4 100755 --- a/scripts/start_signing_service.sh +++ b/scripts/start_signing_service.sh @@ -6,25 +6,49 @@ set +x output=${1} -# instance id +# get EC2 instance ids from autoscaling group stack_name=$(jq -r '. |= keys | .[0]' "${output}") asg_name=$(jq -r '."'${stack_name}'".ASGGroupName' "${output}") -web3signer_init_flag_param_name=$(jq -r '."'${stack_name}'"."Web3SignerInitFlagParamName"' "${output}") -instance_ids=$(./scripts/get_asg_instances.sh ${asg_name} | tr "\n" " ") +# get instance ids from autoscaling group, flatten the array +instance_ids=$(./scripts/get_asg_instances.sh "${asg_name}"| tr "\n" " ") + +# get SSM init flag parameter name from cdk output file +web3signer_init_flag_param_name=$(jq -r '."'${stack_name}'"."Web3SignerInitFlagParamName"' "${output}") -start_command_id=$(aws ssm send-command --region "${CDK_DEPLOY_REGION}" --document-name "AWS-RunShellScript" --instance-ids ${instance_ids} --parameters 'commands=["sudo systemctl start nitro-signing-server.service"]' | jq -r '.Command.CommandId') +# send start command to all EC2 instances at the same time +start_command_id=$(aws ssm send-command \ + --region "${CDK_DEPLOY_REGION}" \ + --document-name "AWS-RunShellScript" \ + --instance-ids ${instance_ids} \ + --parameters 'commands=["sudo systemctl start nitro-signing-server.service"]' | jq -r '.Command.CommandId') sleep 15 -status_command_id_hot=$(aws ssm send-command --region "${CDK_DEPLOY_REGION}" --document-name "AWS-RunShellScript" --instance-ids ${instance_ids} --parameters 'commands=["sudo systemctl status nitro-signing-server.service"]' | jq -r '.Command.CommandId') - -instance_ids_nl=$(echo ${instance_ids} | tr "\n " " ") +# send get systemd status command to all EC2 instances at the same time +status_command_id_hot=$(aws ssm send-command \ + --region "${CDK_DEPLOY_REGION}" \ + --document-name "AWS-RunShellScript" \ + --instance-ids ${instance_ids} \ + --parameters 'commands=["sudo systemctl status nitro-signing-server.service"]' | jq -r '.Command.CommandId') + +# iterate over instances in EC2 autoscaling group and get nitro-signing-server (systemd) status values requested before +instance_ids_nl=$(echo "${instance_ids}" | tr "\n " " ") for instance_id in ${instance_ids_nl}; do - status=$(aws ssm list-command-invocations --instance-id ${instance_id} --command-id ${status_command_id_hot} --details | jq -r '.CommandInvocations[0].CommandPlugins[0].Output') - echo "${instance_id}:" - echo ${status} + status=$(aws ssm list-command-invocations \ + --region "${CDK_DEPLOY_REGION}" \ + --instance-id ${instance_id} \ + --command-id "${status_command_id_hot}" \ + --details | jq -r '.CommandInvocations[0].CommandPlugins[0].Output') + echo "${instance_id}: ${status}" done -aws ssm put-parameter --name "${web3signer_init_flag_param_name}" --type "String" --value "true" --overwrite +# init flag validation optional at this point - needs to incorporated into watchdog to do auto systemd start +aws ssm put-parameter \ + --name "${web3signer_init_flag_param_name}" \ + --type "String" \ + --value "true" \ + --overwrite \ + --region "${CDK_DEPLOY_REGION}" \ + --no-cli-pager printf "\n%s\n" "($(date '+%d/%m/%Y %H:%M:%S')) service has been started and is healthy" \ No newline at end of file diff --git a/tests/e2e/cleanup.sh b/tests/e2e/cleanup.sh new file mode 100755 index 0000000..b3b9fd8 --- /dev/null +++ b/tests/e2e/cleanup.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +set -e +set +x + +source ${SCRIPT_DIR}/e2e.env + +cdk destroy devNitroValidator --force \ No newline at end of file diff --git a/tests/e2e/e2e.env b/tests/e2e/e2e.env new file mode 100644 index 0000000..784b127 --- /dev/null +++ b/tests/e2e/e2e.env @@ -0,0 +1,4 @@ +export CDK_DEPLOY_REGION=us-east-1 +export CDK_DEPLOY_ACCOUNT=$(aws sts get-caller-identity | jq -r '.Account') +# https://github.com/aws/aws-cdk/issues/30258 +export BUILDX_NO_DEFAULT_ATTESTATIONS=1 diff --git a/tests/e2e/e2e_setup.sh b/tests/e2e/e2e_setup.sh index 62c4447..64c457d 100755 --- a/tests/e2e/e2e_setup.sh +++ b/tests/e2e/e2e_setup.sh @@ -1,28 +1,70 @@ #!/usr/bin/env bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) set -e -set -x +set +x -export CDK_DEPLOY_REGION=us-east-1 -export CDK_DEPLOY_ACCOUNT=$(aws sts get-caller-identity | jq -r '.Account') +source ${SCRIPT_DIR}/e2e.env +output_file="nitro_validator_output.json" + +printf "building kmstool_enclave_cli\n" ./scripts/build_kmstool_enclave_cli.sh -cdk deploy devNitroValidator -O nitro_validator_output.json --require-approval=never - -export CF_STACK_NAME=$(jq -r '. |= keys | .[0]' nitro_validator_output.json) -export KMS_KEY_ARN=$(jq -r ".$CF_STACK_NAME.KMSKeyARN" nitro_validator_output.json) -export DDB_TABLE_NAME=$(jq -r ".${CF_STACK_NAME}.ValidatorKeysTableName" nitro_validator_output.json) -export FUNCTION_ARN=$(jq -r ".${CF_STACK_NAME}.LambdaFunctionArn" nitro_validator_output.json) - -cd scripts/load_validator_keys -pip3 install -r requirements.txt -python3 load_validator_keys.py -cd ../.. - -./scripts/generate_key_policy.sh nitro_validator_output.json >key_policy.json -aws kms put-key-policy --policy-name default --key-id "${KMS_KEY_ARN}" --policy file://key_policy.json -aws lambda invoke --function-name "${FUNCTION_ARN}" --cli-binary-format raw-in-base64-out --payload '{"operation": "set_tls_key"}' lambda-output -./scripts/start_signing_service.sh nitro_validator_output.json -./tests/e2e/web3signer_status.sh nitro_validator_output.json + +printf "deploying cdk stack" +cdk deploy devNitroValidator -O "${output_file}" --require-approval=never + +export CF_STACK_NAME=$(jq -r '. |= keys | .[0]' "${output_file}") +export KMS_KEY_ARN=$(jq -r ".$CF_STACK_NAME.KMSKeyARN" "${output_file}") +export DDB_TABLE_NAME=$(jq -r ".${CF_STACK_NAME}.ValidatorKeysTableName" "${output_file}") +export FUNCTION_ARN=$(jq -r ".${CF_STACK_NAME}.LambdaFunctionArn" "${output_file}") + +# get SSM init flag parameter name from cdk output file - if init flag has been set to true already, skip validation key generation and set tls key step +web3signer_init_flag_param_name=$(jq -r ".$CF_STACK_NAME.Web3SignerInitFlagParamName" "${output_file}") + +# get aws ssm init parameter +init_flag=$(aws ssm get-parameter \ + --name "${web3signer_init_flag_param_name}" \ + --region "${CDK_DEPLOY_REGION}" \ + --no-cli-pager \ + --query "Parameter.Value" \ + --output text) + +# if init_flag is true, service has been started before, no key and tls key generation required +if [[ "${init_flag}" == "true" ]]; then + printf "init flag is already set to true, skipping validation key generation and setting tls key\n" +else + printf "loading validator keys\n" + cd scripts/load_validator_keys + pip3 install -r requirements.txt + python3 load_validator_keys.py + cd ../.. + + printf "setting tls key\n" + aws lambda invoke --no-cli-pager \ + --function-name "${FUNCTION_ARN}" \ + --region "${CDK_DEPLOY_REGION}" \ + --cli-binary-format raw-in-base64-out \ + --payload '{"operation": "set_tls_key"}' lambda-output +fi + +printf "generating key policy\n" +./scripts/generate_key_policy.sh "${output_file}" >key_policy.json + +printf "putting key policy\n" +aws kms put-key-policy \ + --policy-name default \ + --key-id "${KMS_KEY_ARN}" \ + --policy file://key_policy.json \ + --region "${CDK_DEPLOY_REGION}" \ + --no-cli-pager + +sleep 5 +printf "starting signing service\n" +./scripts/start_signing_service.sh "${output_file}" + +sleep 20 +printf "checking web3singer status\n" +./tests/e2e/web3signer_status.sh "${output_file}" diff --git a/tests/e2e/web3signer_status.sh b/tests/e2e/web3signer_status.sh index 6442f22..5153834 100755 --- a/tests/e2e/web3signer_status.sh +++ b/tests/e2e/web3signer_status.sh @@ -19,13 +19,18 @@ function send_request() { printf "\n%s\n" "$(date '+%d/%m/%Y %H:%M:%S'): sending request" echo "${GENERIC_REQUEST}" | jq '.operation="'${1}'"' >.tmp.payload # $( echo ${payload} | jq -R -s '.') - aws lambda invoke --cli-binary-format raw-in-base64-out --function-name "${lambda_function_name}" --payload file://.tmp.payload .tmp.out + aws lambda invoke \ + --no-cli-pager \ + --cli-binary-format raw-in-base64-out \ + --region "${CDK_DEPLOY_REGION}" \ + --function-name "${lambda_function_name}" \ + --payload file://.tmp.payload .tmp.out echo "result: $(<.tmp.out)" rm -rf .tmp.out .tmp.payload } -while True; do +while true; do send_request "${STATUS_OPERATION}" send_request "${PUBLIC_KEYS_OPERATION}" - sleep 1 + sleep 5 done diff --git a/user_data/user_data.sh b/user_data/user_data.sh index 62bc31e..671cd0d 100644 --- a/user_data/user_data.sh +++ b/user_data/user_data.sh @@ -74,8 +74,10 @@ if [[ ! -d ./app/server ]]; then set -x set -e -account_id=$( aws sts get-caller-identity | jq -r '.Account' ) -region=$( curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | jq -r '.region' ) +token=$( curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600" ) +account_id=$( curl -H "X-aws-ec2-metadata-token: $token" http://169.254.169.254/latest/dynamic/instance-identity/document | jq -r '.accountId' ) +region=$( curl -H "X-aws-ec2-metadata-token: $token" http://169.254.169.254/latest/meta-data/placement/region ) + aws ecr get-login-password --region $region | docker login --username AWS --password-stdin $account_id.dkr.ecr.$region.amazonaws.com docker pull ${__SIGNING_SERVER_IMAGE_URI__} docker pull ${__SIGNING_ENCLAVE_IMAGE_URI__} @@ -93,8 +95,8 @@ fi if [[ ! -f /etc/systemd/system/nitro-signing-server.service ]]; then - aws s3 cp ${__WATCHDOG_SYSTEMD_S3_URL__} /etc/systemd/system/nitro-signing-server.service - aws s3 cp ${__WATCHDOG_S3_URL__} /home/ec2-user/app/watchdog.py + aws --region ${__REGION__} s3 cp ${__WATCHDOG_SYSTEMD_S3_URL__} /etc/systemd/system/nitro-signing-server.service + aws --region ${__REGION__} s3 cp ${__WATCHDOG_S3_URL__} /home/ec2-user/app/watchdog.py chmod +x /home/ec2-user/app/watchdog.py @@ -111,5 +113,5 @@ if [[ $init_flag == "true" ]]; then fi # docker over system process manager -sudo docker run -d --restart unless-stopped --name http_server -p 8443:443 ${__SIGNING_SERVER_IMAGE_URI__} +docker run -d --restart unless-stopped --security-opt seccomp=unconfined --name http_server -p 8443:443 ${__SIGNING_SERVER_IMAGE_URI__} --//--