Merge pull request #1140 from dod-ccpo/azure-ci

Update CI and remove AWS config.
This commit is contained in:
dandds 2019-10-28 11:34:25 -04:00 committed by GitHub
commit 380a9beb6f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 68 additions and 490 deletions

View File

@ -1,8 +1,6 @@
version: 2.1
orbs:
aws-ecr: circleci/aws-ecr@6.3.0
aws-eks: circleci/aws-eks@0.1.0
azure-acr: circleci/azure-acr@0.1.2
azure-aks: circleci/azure-aks@0.2.0
kubernetes: circleci/kubernetes@0.3.0
@ -11,11 +9,10 @@ defaults:
appEnvironment: &appEnvironment
KEEP_EXISTING_VENV: true
PGHOST: localhost
PGUSER: root
PGUSER: postgres
PGDATABASE: circle_test
REDIS_URI: redis://localhost:6379
PIP_VERSION: 18.*
CRL_STORAGE_PROVIDER: CLOUDFILES
commands:
migration_setup:
@ -39,21 +36,21 @@ commands:
name: Apply Migrations and Seed Roles
jobs:
app_setup:
test:
docker:
- image: circleci/python:3.7.3-stretch-node
environment: *appEnvironment
- image: circleci/postgres:9.6.5-alpine-ram
- image: circleci/postgres:10-alpine-ram
- image: circleci/redis:4-alpine3.8
steps:
- checkout
- run: sudo apt-get update
- run: sudo apt-get install postgresql-client-9.6
- run: sudo apt-get install postgresql-client
- attach_workspace:
at: .
- run: ./script/setup
- save_cache:
name: "Save Cache: Pipenv Refrences"
name: "Save Cache: Pipenv References"
paths:
- ~/.local/share
key: pipenv-v1-{{ .Branch }}-{{ checksum "Pipfile.lock" }}
@ -73,40 +70,79 @@ jobs:
- ./node_modules
key: node-v1-{{ .Branch }}-{{ checksum "yarn.lock" }}
- run:
name: "Update CRLs"
command: ./script/sync-crls
- run:
name: "Generate build info"
command: ./script/generate_build_info.sh
name: "Run Tests"
command: ./script/cibuild
- persist_to_workspace:
root: .
paths:
- .
test:
integration-tests:
docker:
- image: circleci/python:3.7.3-stretch-node
environment: *appEnvironment
- image: circleci/postgres:9.6.5-alpine-ram
- image: docker:18.06.0-ce-git
- image: circleci/postgres:10-alpine-ram
- image: circleci/redis:4-alpine3.8
steps:
- attach_workspace:
at: .
- run: sudo apt-get update
- run: sudo apt-get install postgresql-client-9.6
- setup_remote_docker:
version: 18.06.0-ce
- run:
name: "Run Tests"
command: ./script/cibuild
aws-migration:
executor: aws-eks/python3
steps:
- migration_setup:
container_image: "$AWS_ECR_ACCOUNT_URL/atat:atat-$CIRCLE_SHA1"
- aws-eks/update-kubeconfig-with-authenticator:
cluster-name: atat
aws-region: "${AWS_REGION}"
- migration_apply
name: Remove existing font symlink
command: rm static/fonts
- run:
name: Set up temporary docker network
command: docker network create atat
- run:
name: Build image
command: docker build . -t atat:latest
- run:
name: Start redis
command: docker run -d --network atat --link redis:redis -p 6379:6379 --name redis circleci/redis:4-alpine3.8
- run:
name: Start postgres
command: docker run -d --network atat --link postgres:postgres -p 5432:5432 --name postgres circleci/postgres:10-alpine-ram
- run:
name: Start application container
command: |
docker run -d \
-e DISABLE_CRL_CHECK=true \
-e PGHOST=postgres \
-e REDIS_URI=redis://redis:6379 \
-p 8000:8000 \
--network atat \
--name test-atat \
atat:latest \
uwsgi \
--callable app \
--module app \
--plugin python3 \
--virtualenv /opt/atat/atst/.venv \
--http-socket :8000
- run:
name: Wait for containers
command: sleep 3
- run:
name: Create database
command: docker exec postgres createdb -U postgres atat
- run:
name: Apply migrations
command: docker exec test-atat .venv/bin/python .venv/bin/alembic upgrade head
- run:
name: Apply the default permission sets
command: docker exec test-atat .venv/bin/python script/seed_roles.py
- run:
name: Execute Ghost Inspector test suite
command: |
docker pull ghostinspector/test-runner-standalone:latest
docker run \
-e NGROK_TOKEN=$NGROK_TOKEN \
-e GI_API_KEY=$GI_API_KEY \
-e GI_SUITE=$GI_SUITE \
-e GI_PARAMS_JSON='{}' \
-e APP_PORT="test-atat:8000" \
--network atat \
ghostinspector/test-runner-standalone:latest
azure-migration:
executor: azure-aks/default
@ -135,78 +171,11 @@ jobs:
- run: "docker tag ${AZURE_SERVER_NAME}/atat:atat-${CIRCLE_SHA1} ${AZURE_SERVER_NAME}/atat:latest"
- run: "docker push ${AZURE_SERVER_NAME}/atat:latest"
integration-tests:
docker:
- image: docker:17.05.0-ce-git
steps:
- setup_remote_docker:
version: 18.06.0-ce
- checkout
- run:
name: Set up temporary docker network
command: docker network create atat
- run:
name: Build image
command: docker build . -t atat:latest
- run:
name: Get storage containers
command: docker pull postgres:latest && docker pull redis:latest
- run:
name: Start redis
command: docker run -d --network atat --link redis:redis -p 6379:6379 --name redis redis:latest
- run:
name: Start postgres
command: docker run -d --network atat --link postgres:postgres -p 5432:5432 --name postgres postgres:latest
- run:
name: Start application container
command: |
docker run -d \
-e DISABLE_CRL_CHECK=true \
-e PGHOST=postgres \
-e REDIS_URI=redis://redis:6379 \
-p 8000:8000 \
--network atat \
--name test-atat \
atat:latest \
uwsgi \
--callable app \
--module app \
--plugin python3 \
--virtualenv /opt/atat/atst/.venv \
--http-socket :8000
- run:
name: Wait for containers
command: sleep 3
- run:
name: Create database
command: docker exec postgres createdb -U postgres atat
- run:
name: Apply migrations
command: docker exec test-atat .venv/bin/python .venv/bin/alembic upgrade head
- run:
name: Apply the default permission sets
command: docker exec test-atat .venv/bin/python script/seed_roles.py
- run:
name: Execute Ghost Inspector test suite
command: |
docker pull ghostinspector/test-runner-standalone:latest
docker run \
-e NGROK_TOKEN=$NGROK_TOKEN \
-e GI_API_KEY=$GI_API_KEY \
-e GI_SUITE=$GI_SUITE \
-e GI_PARAMS_JSON='{}' \
-e APP_PORT="test-atat:8000" \
--network atat \
ghostinspector/test-runner-standalone:latest
workflows:
version: 2
run-tests:
jobs:
- app_setup
- test:
requires:
- app_setup
- test
- integration-tests:
requires:
- test
@ -266,62 +235,3 @@ workflows:
branches:
only:
- master
- aws-ecr/build-and-push-image:
extra-build-args: "--build-arg CSP=aws"
repo: atat
tag: "atat-${CIRCLE_SHA1},latest"
requires:
- integration-tests
filters:
branches:
only:
- master
- aws-migration:
requires:
- aws-ecr/build-and-push-image
filters:
branches:
only:
- master
- aws-eks/update-container-image:
cluster-name: atat
container-image-updates: "atst=${AWS_ECR_ACCOUNT_URL}/atat:atat-${CIRCLE_SHA1}"
namespace: atat
resource-name: deployment.apps/atst
aws-region: "${AWS_REGION}"
# uncomment below for debugging
# show-kubectl-command: true
requires:
- aws-migration
filters:
branches:
only:
- master
- aws-eks/update-container-image:
cluster-name: atat
container-image-updates: "atst-worker=${AWS_ECR_ACCOUNT_URL}/atat:atat-${CIRCLE_SHA1}"
namespace: atat
resource-name: deployment.apps/atst-worker
aws-region: "${AWS_REGION}"
# uncomment below for debugging
# show-kubectl-command: true
requires:
- aws-migration
filters:
branches:
only:
- master
- aws-eks/update-container-image:
cluster-name: atat
container-image-updates: "atst-beat=${AWS_ECR_ACCOUNT_URL}/atat:atat-${CIRCLE_SHA1}"
namespace: atat
resource-name: deployment.apps/atst-beat
aws-region: "${AWS_REGION}"
# uncomment below for debugging
# show-kubectl-command: true
requires:
- aws-migration
filters:
branches:
only:
- master

View File

@ -1,19 +0,0 @@
#!/bin/sh
# script/alpine_setup: Adds all the system packages, directors, users, etc.
# required to run the application on Alpine
source "$(dirname "${0}")"/../script/include/global_header.inc.sh
# Set app specific items
APP_USER="atst"
APP_UID="8010"
# Add additional packages required by app dependencies
ADDITIONAL_PACKAGES="postgresql-libs python3 rsync uwsgi uwsgi-python3 uwsgi-logfile"
# add sync-crl cronjob for atst user
echo "1 */6 * * * /opt/atat/atst/script/sync-crls tests/crl-tmp" >> /etc/crontabs/atst
# Run the shared alpine setup script
source ./script/include/run_alpine_setup

View File

@ -1,24 +0,0 @@
#!/bin/bash
# script/fix_permissions: Updates the app directory with the correct user
# permissions (skipping node_modules since it is not
# required and very large)
source "$(dirname "${0}")"/../script/include/global_header.inc.sh
APP_USER="${1}"
APP_GROUP="${2}"
if [ "${APP_USER}x" = "x" ] || [ "${APP_GROUP}x" = "x" ]; then
echo "ERROR: Missing username or groupname argument!"
echo "Received: *${APP_USER}:${APP_GROUP}*"
echo
exit 1
fi
chown "${APP_USER}:${APP_GROUP}" .
chown "${APP_USER}:${APP_GROUP}" ./*
for subdir in $(find . -type d -maxdepth 1 | grep -Ee '.[^/]' | grep -Fve 'node_modules')
do
chown "${APP_USER}:${APP_GROUP}" -R "${subdir}"
done

View File

@ -1,143 +0,0 @@
#!/bin/bash
#
# script/generate_build_info: Generates buildinfo.html and buildinfo.json and
# places them in a publicly accessable static asset
# folder
source "$(dirname "${0}")"/../script/include/global_header.inc.sh
# Config
APP_NAME="ATST"
STATIC_DIR="./static"
if [ "${CIRCLECI}" = "true" ]
then
# This is a CircleCI build
BUILD_NUMBER="${CIRCLE_BUILD_NUM}"
BUILD_STATUS_URL="${CIRCLE_BUILD_URL}"
BUILT_BY="CircleCI"
CIRCLECI_WORKFLOW_BASEURL="https://circleci.com/workflow-run"
GIT_BRANCH="${CIRCLE_BRANCH}"
WORKFLOW_ID="${CIRCLE_WORKFLOW_ID}"
WORKFLOW_STATUS_URL="${CIRCLECI_WORKFLOW_BASEURL}/${CIRCLE_WORKFLOW_ID}"
else
# Assume we're running on TravisCI instead
BUILD_NUMBER="${TRAVIS_BUILD_ID}"
BUILD_STATUS_URL="https://travis-ci.org/$TRAVIS_REPO_SLUG/builds/$TRAVIS_BUILD_ID"
BUILT_BY="TravisCI"
GIT_BRANCH="${TRAVIS_BRANCH}"
WORKFLOW_ID="N/A"
WORKFLOW_STATUS_URL="#"
fi
echo "### Generate Build Info ###"
echo "Gathering info from git..."
COMMIT_AUTHOR=$(git log -1 --pretty=%aN)
COMMIT_AUTHOR_EMAIL=$(git log -1 --pretty=%aE)
GIT_SHA=$(git rev-parse HEAD)
# Escape all double quotes in commit message and switch newlines for \n
# (for JSON compatability)
COMMIT_MESSAGE_JSON=$(git log -1 --pretty=format:%B | sed -e 's#\([^\\]\)"#\1\\"#g' | awk 1 ORS='\\n')
# Escape all < and > characters in commit message and trade newlines for <BR/> tags
COMMIT_MESSAGE_HTML=$(git log -1 --pretty=format:%B | sed -e 's#>#&gt;#g' | sed -e 's#<#&lt;#g' | awk 1 ORS='<BR/>')
# Assemble https based git repo url
GIT_REMOTE_URL=$(git config --get remote.origin.url)
if [[ ${GIT_REMOTE_URL} =~ "@" ]]
then
GIT_URL="https://github.com/$(echo "${GIT_REMOTE_URL}" | cut -d ':' -f 2)"
else
GIT_URL="${GIT_REMOTE_URL}"
fi
# Drop the trailing .git for generating github links
GITHUB_BASE_URL="${GIT_URL%.git}"
GITHUB_COMMIT_URL="${GITHUB_BASE_URL}/commit/${GIT_SHA}"
APP_CONTAINER_CREATE_DATE=$(date '+%Y-%m-%d')
APP_CONTAINER_CREATE_TIME=$(date '+%H:%M:%S')
echo "Generating ${STATIC_DIR}/buildinfo.json ..."
cat > ${STATIC_DIR}/buildinfo.json <<ENDJSON
{
"build_info" : {
"project_name" : "${APP_NAME}",
"build_id" : "${BUILD_NUMBER}",
"build_url" : "${BUILD_STATUS_URL}",
"built_by" : "${BUILT_BY}",
"workflow_id" : "${WORKFLOW_ID}",
"workflow_url" : "${WORKFLOW_STATUS_URL}"
},
"image_info" : {
"create_date" : "${APP_CONTAINER_CREATE_DATE}",
"create_time" : "${APP_CONTAINER_CREATE_TIME}"
},
"git_info" : {
"repository_url" : "${GIT_URL}",
"branch" : "${GIT_BRANCH}",
"commit" : {
"sha" : "${GIT_SHA}",
"github_commit_url" : "${GITHUB_COMMIT_URL}",
"author_name" : "${COMMIT_AUTHOR}",
"author_email" : "${COMMIT_AUTHOR_EMAIL}",
"message" : "${COMMIT_MESSAGE_JSON}"
}
}
}
ENDJSON
echo "Generating ${STATIC_DIR}/buildinfo.html ..."
cat > ${STATIC_DIR}/buildinfo.html <<ENDHTML
<HTML>
<HEAD>
<TITLE>${APP_NAME} build ${BUILD_NUMBER} info</TITLE>
<STYLE>
table {
display: table;
border-width: 1px;
border-color: green;
border-spacing: 0px;
}
td {
padding: 5px;
vertical-align: top;
}
td.label {
text-align: right;
font-weight: bold;
}
</STYLE>
</HEAD>
<BODY>
<TABLE border="1">
<TR>
<TH colspan="2">BuildInfo (${BUILT_BY})</TH>
</TR>
<TR>
<TD class="label">Container Image Creation Time:</TD>
<TD>${APP_CONTAINER_CREATE_DATE} ${APP_CONTAINER_CREATE_TIME}</TD>
</TR>
<TR>
<TD class="label">Build Number:</TD>
<TD><A target="_blank" href="${BUILD_STATUS_URL}">${BUILD_NUMBER}</A></TD>
</TR>
<TR>
<TD class="label">Workflow Number:</TD>
<TD><A target="_blank" href="${WORKFLOW_STATUS_URL}">${WORKFLOW_ID}</A></TD>
</TR>
<TR>
<TD class="label">Commit SHA:</TD>
<TD><A target="_blank" href="${GITHUB_COMMIT_URL}">${GIT_SHA}</A></TD>
</TR>
<TR>
<TD class="label">Commit Author:</TD>
<TD>${COMMIT_AUTHOR} &lt;${COMMIT_AUTHOR_EMAIL}&gt;</TD>
</TR>
<TR>
<TD class="label">Commit Message:</TD>
<TD>${COMMIT_MESSAGE_HTML}</TD>
</TR>
</TABLE>
</BODY>
</HTML>
ENDHTML

View File

@ -1,27 +0,0 @@
#!/bin/bash
# script/get_crl_expiry: Will print the names and expiration dates
# for CRLs that exist in a given ATAT namespace.
# usage: `script/get_crl_expiry [NAMESPACE]`
# defaults to `atat` for the namespace
# You must have a valid k8s config for the ATAT clusters to run
# this. Keep in mind it parses every CRL so it is slow.
if [[ $# -eq 0 ]]; then
NAMESPACE=atat
else
NAMESPACE=$1
fi
# we only need to run these commands against one existing pod
ATST_POD=$(kubectl -n ${NAMESPACE} get pods -l app=atst -o custom-columns=NAME:.metadata.name --no-headers | sed -n 1p)
echo "expiration information for $NAMESPACE namespace, pod $ATST_POD"
for i in $(kubectl -n $NAMESPACE exec $ATST_POD -c atst -- ls crls); do
expiry=$(kubectl -n $NAMESPACE exec $ATST_POD -c atst -- cat crls/$i | \
openssl crl -inform def -noout -text | \
grep "Next Update" | \
sed -E "s/ +Next Update: //g")
echo "$i: $expiry";
done

View File

@ -1,42 +0,0 @@
#!/bin/bash
# script/make-test-cac: Set up a test CAC card.
# Usage:
# ./script/make-test-cac [DOD identifier string] [user email] [output name]
# i.e.:
# ./script/make-test-cac JONES.ANDY.1234567890 andy@example.com andy
# The script will output 3 files:
# 1. The certificate (crt) file (for reference)
# 2. The certificate key (key) file (also for reference)
# 3. The PFX file, which is the package file that needs to be loaded on the PIVKey brand card
set -e
SAN="subjectAltName=email:$2"
openssl genrsa -out $3.key 2048
CSR=$(openssl req \
-new \
-nodes \
-subj "/CN=$1" \
-reqexts SAN \
-config <(cat /etc/ssl/openssl.cnf; echo '[SAN]'; echo $SAN) \
-key $3.key )
openssl x509 \
-req \
-in <(echo "$CSR") \
-days 365 \
-CA "ssl/client-certs/client-ca.crt" \
-CAkey "ssl/client-certs/client-ca.key" \
-CAcreateserial \
-extensions SAN \
-extfile <(cat /etc/ssl/openssl.cnf; echo '[SAN]'; echo $SAN) \
-out $3.crt
openssl pkcs12 -passout pass: -export -out $3.pfx -inkey $3.key -in $3.crt
echo "Generated files:"
echo " CERT: $3.crt"
echo " KEY: $3.key"
echo " PFX: $3.pfx"

View File

@ -1,11 +0,0 @@
#!/bin/bash
# script/rq_worker: Launch the Flask-RQ worker
source "$(dirname "${0}")"/../script/include/global_header.inc.sh
# Before starting the server, apply any pending migrations to the DB
migrate_db
# Launch the worker
run_command "flask rq worker"

View File

@ -1,53 +0,0 @@
#!/bin/bash
# script/selenium_test: Run selenium tests via BrowserStack
source "$(dirname "${0}")"/../script/include/global_header.inc.sh
export FLASK_ENV=selenium
# create upload directory for app
mkdir uploads | true
# Fetch postgres settings and set them as ENV vars
source ./script/get_db_settings
if [ -n "${PGDATABASE}" ]; then
echo "Resetting database ${PGDATABASE}..."
# Reset the db
reset_db "${PGDATABASE}"
else
echo "ERROR: RESET_DB is set, but PGDATABASE is not!"
echo "Skipping database reset..."
fi
BSL_FILE=BrowserStackLocal
if [[ `uname` == "Darwin" ]]; then
BSL_DOWNLOAD="https://www.browserstack.com/browserstack-local/BrowserStackLocal-darwin-x64.zip"
else
BSL_DOWNLOAD="https://www.browserstack.com/browserstack-local/BrowserStackLocal-linux-x64.zip"
fi
# Fetch BrowserStackLocal script
if [ -e "${BSL_FILE}" ]; then
echo "BrowserStack file already exists"
else
echo "downloading BrowserStack file"
curl $BSL_DOWNLOAD --output $BSL_FILE.zip
unzip $BSL_FILE.zip -d .
rm $BSL_FILE.zip
chmod u+x $BSL_FILE
fi
# run BrowserStackLocal in the background
echo "starting BrowserStack local client..."
./$BSL_FILE --key $BROWSERSTACK_TOKEN &
BSL_ID=$!
trap "kill $BSL_ID" SIGTERM SIGINT EXIT
# run example selenium script that fetches the home page
echo "running selenium tests"
pipenv run pytest tests/acceptance -s --no-cov
# kill BrowserStackLocal
kill $BSL_ID

View File

@ -1,13 +0,0 @@
#!/bin/bash
# script/uwsgi_server: Launch the UWSGI server
source "$(dirname "${0}")"/../script/include/global_header.inc.sh
# Before starting the server, apply any pending migrations to the DB
migrate_db
seed_db
# Launch UWSGI
run_command "uwsgi --ini ${UWSGI_CONFIG_FULLPATH}"