diff --git a/.github/scripts/exit-standby-all-chain-nodes.sh b/.github/scripts/exit-standby-all-chain-nodes.sh new file mode 100755 index 00000000..3fad2155 --- /dev/null +++ b/.github/scripts/exit-standby-all-chain-nodes.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -x + +# get all the node's ec2 instance ids for the specified chain id +chain_node_instance_ids=$(aws ec2 describe-instances --filters "Name=tag:KavaChainId,Values=$CHAIN_ID" | jq -r '[.Reservations | .[] | .Instances | .[] | .InstanceId] | join(" ")') + +for chain_node_instance_id in ${chain_node_instance_ids} +do + autoscaling_group_state=$(aws autoscaling describe-auto-scaling-instances --instance-ids "$chain_node_instance_id" | jq -r '[.AutoScalingInstances | .[].LifecycleState] | join(" ")') + # Possible states: https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-lifecycle.html + case "$autoscaling_group_state" in + Standby) + # exit standby to make the node elgible for reciving + # requests from the target groups for the public load balancer + autoscaling_group_name=$(aws autoscaling describe-auto-scaling-instances --instance-ids "$chain_node_instance_id" | jq -r '[.AutoScalingInstances | .[].AutoScalingGroupName] | join(" ")') + + aws autoscaling exit-standby \ + --instance-ids "$chain_node_instance_id" \ + --auto-scaling-group-name "$autoscaling_group_name" + ;; + *) + echo "instance ($chain_node_instance_id) not in an elgible state ($autoscaling_group_state) for exiting standby, skipping" + ;; + esac +done diff --git a/.github/scripts/put-all-chain-nodes-on-standby.sh b/.github/scripts/put-all-chain-nodes-on-standby.sh new file mode 100755 index 00000000..591e64d0 --- /dev/null +++ b/.github/scripts/put-all-chain-nodes-on-standby.sh @@ -0,0 +1,27 @@ +#!/bin/bash +set -x + +# get all the node's ec2 instance ids for the specified chain id +chain_node_instance_ids=$(aws ec2 describe-instances --filters "Name=tag:KavaChainId,Values=$CHAIN_ID" | jq -r '[.Reservations | .[] | .Instances | .[] | .InstanceId] | join(" ")') + +for chain_node_instance_id in ${chain_node_instance_ids} +do + autoscaling_group_state=$(aws autoscaling describe-auto-scaling-instances --instance-ids "$chain_node_instance_id" | jq -r '[.AutoScalingInstances | .[].LifecycleState] | join(" ")') + # Possible states: https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-lifecycle.html + case "$autoscaling_group_state" in + InService) + # place the nodes on standby so they won't get terminated + # by the autoscaling group during the time + # they are offline for a deploy / upgrade + autoscaling_group_name=$(aws autoscaling describe-auto-scaling-instances --instance-ids "$chain_node_instance_id" | jq -r '[.AutoScalingInstances | .[].AutoScalingGroupName] | join(" ")') + + aws autoscaling enter-standby \ + --instance-ids "$chain_node_instance_id" \ + --auto-scaling-group-name "$autoscaling_group_name" \ + --should-decrement-desired-capacity + ;; + *) + echo "instance ($chain_node_instance_id) not in an elgible state ($autoscaling_group_state) for going on standby, skipping" + ;; + esac +done diff --git a/.github/workflows/cd-internal-testnet.yml b/.github/workflows/cd-internal-testnet.yml index 96577437..fd8b5251 100644 --- a/.github/workflows/cd-internal-testnet.yml +++ b/.github/workflows/cd-internal-testnet.yml @@ -2,11 +2,41 @@ name: Continuous Deployment (Internal Testnet) # run after every successful CI job of new commits to the master branch on: workflow_run: - workflows: [Continuous Integration (Kava Master), Continuous Integration (Commit)] + workflows: [Continuous Integration (Kava Master)] types: - completed + jobs: - # run default ci checks against master branch - no-op: - if: ${{ github.event.workflow_run.conclusion == 'success' }} - uses: ./.github/workflows/ci-lint.yml + # in order: + # enter standby (prevents autoscaling group from killing node during deploy) + # stop kava + # take ebs + zfs snapshots + # download updated binary and genesis + # reset application database state (only done on internal testnet) + reset-chain-to-zero-state: + uses: ./.github/workflows/cd-reset-internal-testnet.yml + with: + aws-region: us-east-1 + chain-id: demo_2221-17000 + ssm-document-name: kava-demonet-node-update + playbook-name: reset-internal-testnet-playbook.yml + playbook-infrastructure-branch: ls-deploy-testnet-for-cd-prototyping + secrets: inherit + + # start kava with new binary and genesis state on api, peer and seed nodes, place nodes in service once they start and are synched to live + start-chain-api: + uses: ./.github/workflows/cd-start-chain.yml + with: + aws-region: us-east-1 + chain-id: demo_2221-17000 + ssm-document-name: kava-demonet-node-update + playbook-name: start-chain-api-playbook.yml + playbook-infrastructure-branch: ls-deploy-testnet-for-cd-prototyping + secrets: inherit + needs: [reset-chain-to-zero-state] + + # setup test and development accounts and balances, deploy contracts by calling the chain's api + seed-chain-state: + uses: ./.github/workflows/cd-seed-chain.yml + secrets: inherit + needs: [start-chain-api] diff --git a/.github/workflows/cd-reset-internal-testnet.yml b/.github/workflows/cd-reset-internal-testnet.yml new file mode 100644 index 00000000..bdabd073 --- /dev/null +++ b/.github/workflows/cd-reset-internal-testnet.yml @@ -0,0 +1,82 @@ +name: Reset Internal Testnet + +on: + workflow_call: + inputs: + chain-id: + required: true + type: string + aws-region: + required: true + type: string + ssm-document-name: + required: true + type: string + playbook-name: + required: true + type: string + playbook-infrastructure-branch: + required: true + type: string + secrets: + CI_AWS_KEY_ID: + required: true + CI_AWS_KEY_SECRET: + required: true + KAVA_PRIVATE_GITHUB_ACCESS_TOKEN: + required: true + +# in order: +# enter standby (prevents autoscaling group from killing node during deploy) +# stop kava +# download updated binary and genesis +# reset application database state (only done on internal testnet) +jobs: + place-chain-nodes-on-standby: + runs-on: ubuntu-latest + steps: + - name: checkout repo from current commit + uses: actions/checkout@v3 + - name: take the chain offline + run: bash ${GITHUB_WORKSPACE}/.github/scripts/put-all-chain-nodes-on-standby.sh + env: + CHAIN_ID: ${{ inputs.chain-id }} + AWS_REGION: ${{ inputs.aws-region }} + AWS_ACCESS_KEY_ID: ${{ secrets.CI_AWS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.CI_AWS_KEY_SECRET }} + - name: checkout infrastructure repo + uses: actions/checkout@v3 + with: + repository: Kava-Labs/infrastructure + token: ${{ secrets.KAVA_PRIVATE_GITHUB_ACCESS_TOKEN }} + path: infrastructure + ref: ls-deploy-testnet-for-cd-prototyping + - name: set up Go + uses: actions/setup-go@v3 + with: + go-version: '1.18' + check-latest: true + cache: true + - name: build kava node updater + run: cd infrastructure/cli/kava-node-updater && make install && cd ../../../ + - name: run reset playbook on all chain nodes + run: | + kava-node-updater \ + --debug \ + --max-retries=2 \ + --aws-ssm-document-name=$SSM_DOCUMENT_NAME \ + --infrastructure-git-pointer=$PLAYBOOK_INFRASTRUCTURE_BRANCH \ + --update-playbook-filename=$PLAYBOOK_NAME \ + --chain-id=$CHAIN_ID \ + --max-upgrade-batch-size=0 \ + --node-states=Standby \ + --wait-for-node-sync-after-upgrade=false + env: + SSM_DOCUMENT_NAME: ${{ inputs.ssm-document-name }} + PLAYBOOK_NAME: ${{ inputs.playbook-name }} + CHAIN_ID: ${{ inputs.chain-id }} + AWS_REGION: ${{ inputs.aws-region }} + AWS_ACCESS_KEY_ID: ${{ secrets.CI_AWS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.CI_AWS_KEY_SECRET }} + AWS_SDK_LOAD_CONFIG: 1 + PLAYBOOK_INFRASTRUCTURE_BRANCH: ${{ inputs.playbook-infrastructure-branch }} diff --git a/.github/workflows/cd-seed-chain.yml b/.github/workflows/cd-seed-chain.yml new file mode 100644 index 00000000..8c00305d --- /dev/null +++ b/.github/workflows/cd-seed-chain.yml @@ -0,0 +1,11 @@ +name: Seed Chain + +on: + workflow_call: + +jobs: + no-op: + runs-on: ubuntu-latest + steps: + - name: no-op + run: echo "Hello World" diff --git a/.github/workflows/cd-start-chain.yml b/.github/workflows/cd-start-chain.yml new file mode 100644 index 00000000..09980b62 --- /dev/null +++ b/.github/workflows/cd-start-chain.yml @@ -0,0 +1,80 @@ +name: Start Chain + +on: + workflow_call: + inputs: + chain-id: + required: true + type: string + aws-region: + required: true + type: string + ssm-document-name: + required: true + type: string + playbook-name: + required: true + type: string + playbook-infrastructure-branch: + required: true + type: string + secrets: + CI_AWS_KEY_ID: + required: true + CI_AWS_KEY_SECRET: + required: true + KAVA_PRIVATE_GITHUB_ACCESS_TOKEN: + required: true + +jobs: + # start kava, allow nodes to start processing requests from users once they are synced to live + serve-traffic: + runs-on: ubuntu-latest + steps: + - name: checkout repo from current commit + uses: actions/checkout@v3 + - name: take the chain offline + run: bash ${GITHUB_WORKSPACE}/.github/scripts/put-all-chain-nodes-on-standby.sh + env: + CHAIN_ID: ${{ inputs.chain-id }} + AWS_REGION: ${{ inputs.aws-region }} + AWS_ACCESS_KEY_ID: ${{ secrets.CI_AWS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.CI_AWS_KEY_SECRET }} + - name: checkout infrastructure repo + uses: actions/checkout@v3 + with: + repository: Kava-Labs/infrastructure + token: ${{ secrets.KAVA_PRIVATE_GITHUB_ACCESS_TOKEN }} + path: infrastructure + ref: ls-deploy-testnet-for-cd-prototyping + - name: set up Go + uses: actions/setup-go@v3 + with: + go-version: '1.18' + check-latest: true + cache: true + - name: build kava node updater + run: cd infrastructure/cli/kava-node-updater && make install && cd ../../../ + - name: run start-chain playbook on all chain nodes + run: | + kava-node-updater \ + --debug \ + --max-retries=2 \ + --aws-ssm-document-name=$SSM_DOCUMENT_NAME \ + --infrastructure-git-pointer=$PLAYBOOK_INFRASTRUCTURE_BRANCH \ + --update-playbook-filename=$PLAYBOOK_NAME \ + --chain-id=$CHAIN_ID \ + --max-upgrade-batch-size=0 \ + --node-states=Standby \ + --wait-for-node-sync-after-upgrade=true + env: + SSM_DOCUMENT_NAME: ${{ inputs.ssm-document-name }} + PLAYBOOK_NAME: ${{ inputs.playbook-name }} + CHAIN_ID: ${{ inputs.chain-id }} + AWS_REGION: ${{ inputs.aws-region }} + AWS_ACCESS_KEY_ID: ${{ secrets.CI_AWS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.CI_AWS_KEY_SECRET }} + AWS_SDK_LOAD_CONFIG: 1 + PLAYBOOK_INFRASTRUCTURE_BRANCH: ${{ inputs.playbook-infrastructure-branch }} + - name: bring the chain online + run: bash ${GITHUB_WORKSPACE}/.github/scripts/exit-standby-all-chain-nodes.sh