Merge branch 'staging' of github-DDS:dod-ccpo/atst into gi-updates-wo-20191216
This commit is contained in:
commit
18cfffef46
1
.github/CODEOWNERS
vendored
Normal file
1
.github/CODEOWNERS
vendored
Normal file
@ -0,0 +1 @@
|
||||
terraform/* @dandds
|
@ -3,7 +3,7 @@
|
||||
"files": "^.secrets.baseline$|^.*pgsslrootcert.yml$",
|
||||
"lines": null
|
||||
},
|
||||
"generated_at": "2019-12-06T21:22:07Z",
|
||||
"generated_at": "2019-12-13T20:38:57Z",
|
||||
"plugins_used": [
|
||||
{
|
||||
"base64_limit": 4.5,
|
||||
@ -170,7 +170,7 @@
|
||||
"hashed_secret": "e4f14805dfd1e6af030359090c535e149e6b4207",
|
||||
"is_secret": false,
|
||||
"is_verified": false,
|
||||
"line_number": 656,
|
||||
"line_number": 659,
|
||||
"type": "Hex High Entropy String"
|
||||
}
|
||||
]
|
||||
|
@ -84,8 +84,7 @@ COPY --from=builder /install/celery_worker.py ./celery_worker.py
|
||||
COPY --from=builder /install/config/ ./config/
|
||||
COPY --from=builder /install/templates/ ./templates/
|
||||
COPY --from=builder /install/translations.yaml .
|
||||
COPY --from=builder /install/script/seed_roles.py ./script/seed_roles.py
|
||||
COPY --from=builder /install/script/sync-crls ./script/sync-crls
|
||||
COPY --from=builder /install/script/ ./script/
|
||||
COPY --from=builder /install/static/ ./static/
|
||||
COPY --from=builder /install/fixtures/ ./fixtures
|
||||
COPY --from=builder /install/uwsgi.ini .
|
||||
|
@ -0,0 +1,27 @@
|
||||
"""add application name and portfolio_id unique constraint
|
||||
|
||||
Revision ID: c487d91f1a26
|
||||
Revises: 3bd8552f1c57
|
||||
Create Date: 2019-12-13 14:33:23.952450
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = 'c487d91f1a26' # pragma: allowlist secret
|
||||
down_revision = '3bd8552f1c57' # pragma: allowlist secret
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.create_unique_constraint('applications_name_portfolio_id_key', 'applications', ['name', 'portfolio_id'])
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.drop_constraint('applications_name_portfolio_id_key', 'applications', type_='unique')
|
||||
# ### end Alembic commands ###
|
@ -11,7 +11,7 @@ from atst.models import (
|
||||
ApplicationRoleStatus,
|
||||
EnvironmentRole,
|
||||
)
|
||||
from atst.utils import first_or_none
|
||||
from atst.utils import first_or_none, update_or_raise_already_exists_error
|
||||
|
||||
|
||||
class Applications(BaseDomainClass):
|
||||
@ -28,7 +28,7 @@ class Applications(BaseDomainClass):
|
||||
if environment_names:
|
||||
Environments.create_many(user, application, environment_names)
|
||||
|
||||
db.session.commit()
|
||||
update_or_raise_already_exists_error(message="application")
|
||||
return application
|
||||
|
||||
@classmethod
|
||||
@ -53,9 +53,9 @@ class Applications(BaseDomainClass):
|
||||
Environments.create_many(
|
||||
g.current_user, application, new_data["environment_names"]
|
||||
)
|
||||
db.session.add(application)
|
||||
db.session.commit()
|
||||
|
||||
db.session.add(application)
|
||||
update_or_raise_already_exists_error(message="application")
|
||||
return application
|
||||
|
||||
@classmethod
|
||||
|
@ -530,7 +530,6 @@ class AzureCloudProvider(CloudProviderInterface):
|
||||
):
|
||||
sub_client = self.sdk.subscription.SubscriptionClient(credentials)
|
||||
|
||||
display_name = f"{environment.application.name}_{environment.name}_{environment.id}" # proposed format
|
||||
billing_profile_id = "?" # where do we source this?
|
||||
sku_id = AZURE_SKU_ID
|
||||
# These 2 seem like something that might be worthwhile to allow tiebacks to
|
||||
|
@ -1,11 +1,10 @@
|
||||
import datetime
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
|
||||
from atst.database import db
|
||||
from atst.models.clin import CLIN
|
||||
from atst.models.task_order import TaskOrder, SORT_ORDERING
|
||||
from . import BaseDomainClass
|
||||
from .exceptions import AlreadyExistsError
|
||||
from atst.utils import update_or_raise_already_exists_error
|
||||
|
||||
|
||||
class TaskOrders(BaseDomainClass):
|
||||
@ -16,15 +15,8 @@ class TaskOrders(BaseDomainClass):
|
||||
def create(cls, portfolio_id, number, clins, pdf):
|
||||
task_order = TaskOrder(portfolio_id=portfolio_id, number=number, pdf=pdf)
|
||||
db.session.add(task_order)
|
||||
|
||||
try:
|
||||
db.session.commit()
|
||||
except IntegrityError:
|
||||
db.session.rollback()
|
||||
raise AlreadyExistsError("task_order")
|
||||
|
||||
update_or_raise_already_exists_error(message="task_order")
|
||||
TaskOrders.create_clins(task_order.id, clins)
|
||||
|
||||
return task_order
|
||||
|
||||
@classmethod
|
||||
@ -42,12 +34,7 @@ class TaskOrders(BaseDomainClass):
|
||||
task_order.number = number
|
||||
db.session.add(task_order)
|
||||
|
||||
try:
|
||||
db.session.commit()
|
||||
except IntegrityError:
|
||||
db.session.rollback()
|
||||
raise AlreadyExistsError("task_order")
|
||||
|
||||
update_or_raise_already_exists_error(message="task_order")
|
||||
return task_order
|
||||
|
||||
@classmethod
|
||||
|
@ -1,4 +1,4 @@
|
||||
from sqlalchemy import and_, Column, ForeignKey, String
|
||||
from sqlalchemy import and_, Column, ForeignKey, String, UniqueConstraint
|
||||
from sqlalchemy.orm import relationship, synonym
|
||||
|
||||
from atst.models.base import Base
|
||||
@ -34,6 +34,11 @@ class Application(
|
||||
),
|
||||
)
|
||||
members = synonym("roles")
|
||||
__table_args__ = (
|
||||
UniqueConstraint(
|
||||
"name", "portfolio_id", name="applications_name_portfolio_id_key"
|
||||
),
|
||||
)
|
||||
|
||||
@property
|
||||
def users(self):
|
||||
|
@ -2,6 +2,7 @@ from flask import redirect, render_template, request as http_request, url_for, g
|
||||
|
||||
from .blueprint import applications_bp
|
||||
from atst.domain.applications import Applications
|
||||
from atst.domain.exceptions import AlreadyExistsError
|
||||
from atst.domain.portfolios import Portfolios
|
||||
from atst.forms.application import NameAndDescriptionForm, EnvironmentsForm
|
||||
from atst.domain.authz.decorator import user_can_access_decorator as user_can
|
||||
@ -37,6 +38,31 @@ def render_new_application_form(
|
||||
return render_template(template, **render_args)
|
||||
|
||||
|
||||
def update_application(form, application_id=None, portfolio_id=None):
|
||||
if form.validate():
|
||||
application = None
|
||||
try:
|
||||
if application_id:
|
||||
application = Applications.get(application_id)
|
||||
application = Applications.update(application, form.data)
|
||||
flash("application_updated", application_name=application.name)
|
||||
else:
|
||||
portfolio = Portfolios.get_for_update(portfolio_id)
|
||||
application = Applications.create(
|
||||
g.current_user, portfolio, **form.data
|
||||
)
|
||||
flash("application_created", application_name=application.name)
|
||||
|
||||
return application
|
||||
|
||||
except AlreadyExistsError:
|
||||
flash("application_name_error", name=form.data["name"])
|
||||
return False
|
||||
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
@applications_bp.route("/portfolios/<portfolio_id>/applications/new")
|
||||
@applications_bp.route("/applications/<application_id>/new/step_1")
|
||||
@user_can(Permissions.CREATE_APPLICATION, message="view create new application form")
|
||||
@ -64,17 +90,9 @@ def create_or_update_new_application_step_1(portfolio_id=None, application_id=No
|
||||
form = get_new_application_form(
|
||||
{**http_request.form}, NameAndDescriptionForm, application_id
|
||||
)
|
||||
application = update_application(form, application_id, portfolio_id)
|
||||
|
||||
if form.validate():
|
||||
application = None
|
||||
if application_id:
|
||||
application = Applications.get(application_id)
|
||||
application = Applications.update(application, form.data)
|
||||
flash("application_updated", application_name=application.name)
|
||||
else:
|
||||
portfolio = Portfolios.get_for_update(portfolio_id)
|
||||
application = Applications.create(g.current_user, portfolio, **form.data)
|
||||
flash("application_created", application_name=application.name)
|
||||
if application:
|
||||
return redirect(
|
||||
url_for(
|
||||
"applications.update_new_application_step_2",
|
||||
|
@ -1,5 +1,10 @@
|
||||
import re
|
||||
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
|
||||
from atst.database import db
|
||||
from atst.domain.exceptions import AlreadyExistsError
|
||||
|
||||
|
||||
def first_or_none(predicate, lst):
|
||||
return next((x for x in lst if predicate(x)), None)
|
||||
@ -23,3 +28,11 @@ def camel_to_snake(camel_cased):
|
||||
def pick(keys, dct):
|
||||
_keys = set(keys)
|
||||
return {k: v for (k, v) in dct.items() if k in _keys}
|
||||
|
||||
|
||||
def update_or_raise_already_exists_error(message):
|
||||
try:
|
||||
db.session.commit()
|
||||
except IntegrityError:
|
||||
db.session.rollback()
|
||||
raise AlreadyExistsError(message)
|
||||
|
@ -64,6 +64,11 @@ MESSAGES = {
|
||||
"message_template": "You have successfully updated the permissions for {{ user_name }}",
|
||||
"category": "success",
|
||||
},
|
||||
"application_name_error": {
|
||||
"title_template": "",
|
||||
"message_template": """{{ 'flash.application.name_error.message' | translate({ 'name': name }) }}""",
|
||||
"category": "error",
|
||||
},
|
||||
"ccpo_user_added": {
|
||||
"title_template": translate("flash.success"),
|
||||
"message_template": "You have successfully given {{ user_name }} CCPO permissions.",
|
||||
|
@ -3,6 +3,7 @@ bases:
|
||||
- ../../azure/
|
||||
resources:
|
||||
- namespace.yml
|
||||
- reset-cron-job.yml
|
||||
patchesStrategicMerge:
|
||||
- replica_count.yml
|
||||
- ports.yml
|
||||
|
46
deploy/overlays/staging/reset-cron-job.yml
Normal file
46
deploy/overlays/staging/reset-cron-job.yml
Normal file
@ -0,0 +1,46 @@
|
||||
apiVersion: batch/v1beta1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: reset-db
|
||||
namespace: atat
|
||||
spec:
|
||||
schedule: "0 4 * * *"
|
||||
concurrencyPolicy: Replace
|
||||
successfulJobsHistoryLimit: 1
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: atst
|
||||
role: reset-db
|
||||
aadpodidbinding: atat-kv-id-binding
|
||||
spec:
|
||||
restartPolicy: OnFailure
|
||||
containers:
|
||||
- name: reset
|
||||
image: $CONTAINER_IMAGE
|
||||
command: [
|
||||
"/bin/sh", "-c"
|
||||
]
|
||||
args: [
|
||||
"/opt/atat/atst/.venv/bin/python",
|
||||
"/opt/atat/atst/script/reset_database.py"
|
||||
]
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
name: atst-worker-envvars
|
||||
volumeMounts:
|
||||
- name: flask-secret
|
||||
mountPath: "/config"
|
||||
volumes:
|
||||
- name: flask-secret
|
||||
flexVolume:
|
||||
driver: "azure/kv"
|
||||
options:
|
||||
usepodidentity: "true"
|
||||
keyvaultname: "atat-vault-test"
|
||||
keyvaultobjectnames: "staging-AZURE-STORAGE-KEY;staging-MAIL-PASSWORD;staging-PGPASSWORD;staging-REDIS-PASSWORD;staging-SECRET-KEY"
|
||||
keyvaultobjectaliases: "AZURE_STORAGE_KEY;MAIL_PASSWORD;PGPASSWORD;REDIS_PASSWORD;SECRET_KEY"
|
||||
keyvaultobjecttypes: "secret;secret;secret;secret;key"
|
||||
tenantid: $TENANT_ID
|
@ -1,4 +1,5 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# script/cibuild: Run CI related checks and tests
|
||||
|
||||
|
1
terraform/.gitignore
vendored
Normal file
1
terraform/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
.terraform
|
84
terraform/README.md
Normal file
84
terraform/README.md
Normal file
@ -0,0 +1,84 @@
|
||||
# ATAT Terraform
|
||||
Welcome! You've found the ATAT IaC configurations.
|
||||
|
||||
ATAT is configured using terraform and a wrapper script called `secrets-tool`. With `terraform` we can configure infrastructure in a programatic way and ensure consistency across environments.
|
||||
|
||||
## Directory Structure
|
||||
|
||||
**modules/** - Terraform modules. These are modules that can be re-used for multiple environments.
|
||||
|
||||
**providers/** - Specific environment configurations. (dev,production, etc)
|
||||
|
||||
# Setup
|
||||
Install the following requirements.
|
||||
|
||||
I highly recommend [tfenv](https://github.com/tfutils/tfenv) which will help you manage versions of TF and install new ones as needed. It gives you the ability to switch back and forth between versions as necessary, especially when doing upgrades and managing multiple environments. Think of it like `pyenv`.
|
||||
|
||||
Python is required for the `secrets-tool`. It is used to wrap terraform and pass secrets in to terraform from Azure KeyVault. This approach avoids leaving secrets on the filesystem in any way and allow for restricting access to secrets to specific operators.
|
||||
|
||||
Azure CLI is necessary for creating some intial resources, but is also used by the Python Azure SDK to make calls in some cases.
|
||||
|
||||
Requirements:
|
||||
- [tfenv](https://github.com/tfutils/tfenv)
|
||||
- Python 3.7
|
||||
- Python pip
|
||||
- Python virtualenv # FIXME: Switch to `pipenv`
|
||||
- [azure cli](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest)
|
||||
|
||||
# tfenv
|
||||
`tfenv` will allow you to install TF versions. For example.
|
||||
|
||||
```
|
||||
tfenv install 0.12.18
|
||||
```
|
||||
_0.12.18 at time of writing_
|
||||
|
||||
|
||||
To select a version to use
|
||||
```
|
||||
tfenv use 0.12.18
|
||||
```
|
||||
|
||||
# Running Terraform
|
||||
First, you'll need to log in to Azure. With the Azure CLI installed, you can run the following.
|
||||
|
||||
```
|
||||
az login
|
||||
```
|
||||
|
||||
Next, you'll need to initialize the environment. This process pulls down the terraform provider module from github as well as pulls in the modules that will be used by this provider/environment setup.
|
||||
|
||||
```
|
||||
cd providers/dev/
|
||||
terraform init
|
||||
```
|
||||
|
||||
Once initialized, you can run a plan. A `plan` compares the terraform definitions you have configured in the provider directory (Ex. `providers/dev`) with what is in the shared state file in the Azure Object Storage (which all providers are currently configured for). This then also compares it to the state of the services which are running in Azure.
|
||||
|
||||
If nothing has been applied, you'll see all the resources defined in terraform as all new with a `+` next to the resource name. If the resource exists, but has changed, you'll see a `~` next to the resource and the delta of the change to be applied.
|
||||
|
||||
If you're plan looks good, you can run the apply.
|
||||
```
|
||||
terraform apply
|
||||
```
|
||||
|
||||
Check the output for errors. Sometimes the syntax is valid, but some of the configuration may be wrong and only rejected by the Azure API at run time. If this is the case, fix your mistake, and re-run.
|
||||
|
||||
# Shutting down and environment
|
||||
To shutdown and remove an environment completely as to not incur any costs you would need to run a `terraform destroy`.
|
||||
|
||||
```
|
||||
terraform destroy
|
||||
```
|
||||
|
||||
**This will destroy all resources defined in the provider so use with caution!! This will include things like KeyVault, Postgres, and so on. You may lose data!!**
|
||||
|
||||
# Advanced Terraform
|
||||
## Targeted Apply
|
||||
Sometimes you're writing a new module and don't want to make changes to anything else. In this case you can limit what TF changes.
|
||||
|
||||
```
|
||||
terraform plan -target=module.vpc
|
||||
```
|
||||
|
||||
In the above example, this will only run a plan (plan/apply/destroy) on the specific module. This can be a module, or resource. You can get a list of module and resources by running `terraform show`.
|
35
terraform/modules/k8s/main.tf
Normal file
35
terraform/modules/k8s/main.tf
Normal file
@ -0,0 +1,35 @@
|
||||
resource "azurerm_resource_group" "k8s" {
|
||||
name = "${var.name}-${var.environment}-vpc"
|
||||
location = var.region
|
||||
}
|
||||
|
||||
resource "azurerm_kubernetes_cluster" "k8s" {
|
||||
name = "${var.name}-${var.environment}-k8s"
|
||||
location = azurerm_resource_group.k8s.location
|
||||
resource_group_name = azurerm_resource_group.k8s.name
|
||||
dns_prefix = var.k8s_dns_prefix
|
||||
|
||||
service_principal {
|
||||
client_id = "f05a4457-bd5e-4c63-98e1-89aab42645d0"
|
||||
client_secret = "19b69e2c-9f55-4850-87cb-88c67a8dc811"
|
||||
}
|
||||
|
||||
default_node_pool {
|
||||
name = "default"
|
||||
vm_size = "Standard_D1_v2"
|
||||
os_disk_size_gb = 30
|
||||
vnet_subnet_id = var.vnet_subnet_id
|
||||
node_count = 1
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
default_node_pool.0.node_count
|
||||
]
|
||||
}
|
||||
|
||||
tags = {
|
||||
environment = var.environment
|
||||
owner = var.owner
|
||||
}
|
||||
}
|
0
terraform/modules/k8s/outputs.tf
Normal file
0
terraform/modules/k8s/outputs.tf
Normal file
35
terraform/modules/k8s/variables.tf
Normal file
35
terraform/modules/k8s/variables.tf
Normal file
@ -0,0 +1,35 @@
|
||||
variable "region" {
|
||||
type = string
|
||||
description = "Region this module and resources will be created in"
|
||||
}
|
||||
|
||||
variable "name" {
|
||||
type = string
|
||||
description = "Unique name for the services in this module"
|
||||
}
|
||||
|
||||
variable "environment" {
|
||||
type = string
|
||||
description = "Environment these resources reside (prod, dev, staging, etc)"
|
||||
}
|
||||
|
||||
variable "owner" {
|
||||
type = string
|
||||
description = "Owner of the environment and resources created in this module"
|
||||
}
|
||||
|
||||
variable "k8s_dns_prefix" {
|
||||
type = string
|
||||
description = "A DNS prefix"
|
||||
}
|
||||
|
||||
variable "k8s_node_size" {
|
||||
type = string
|
||||
description = "The size of the instance to use in the node pools for k8s"
|
||||
default = "Standard_A1_v2"
|
||||
}
|
||||
|
||||
variable "vnet_subnet_id" {
|
||||
description = "Subnet to use for the default k8s pool"
|
||||
type = string
|
||||
}
|
40
terraform/modules/keyvault/main.tf
Normal file
40
terraform/modules/keyvault/main.tf
Normal file
@ -0,0 +1,40 @@
|
||||
data "azurerm_client_config" "current" {}
|
||||
|
||||
resource "azurerm_resource_group" "keyvault" {
|
||||
name = "${var.name}-${var.environment}-rg"
|
||||
location = var.region
|
||||
}
|
||||
|
||||
resource "azurerm_key_vault" "keyvault" {
|
||||
name = "${var.name}-${var.environment}-keyvault"
|
||||
location = azurerm_resource_group.keyvault.location
|
||||
resource_group_name = azurerm_resource_group.keyvault.name
|
||||
tenant_id = data.azurerm_client_config.current.tenant_id
|
||||
|
||||
sku_name = "premium"
|
||||
|
||||
tags = {
|
||||
environment = var.environment
|
||||
owner = var.owner
|
||||
}
|
||||
}
|
||||
|
||||
resource "azurerm_key_vault_access_policy" "keyvault" {
|
||||
key_vault_id = azurerm_key_vault.keyvault.id
|
||||
|
||||
tenant_id = "b5ab0e1e-09f8-4258-afb7-fb17654bc5b3"
|
||||
object_id = "2ca63d41-d058-4e06-aef6-eb517a53b631"
|
||||
|
||||
key_permissions = [
|
||||
"get",
|
||||
"list",
|
||||
"create",
|
||||
]
|
||||
|
||||
secret_permissions = [
|
||||
"get",
|
||||
"list",
|
||||
"set",
|
||||
]
|
||||
}
|
||||
|
24
terraform/modules/keyvault/variables.tf
Normal file
24
terraform/modules/keyvault/variables.tf
Normal file
@ -0,0 +1,24 @@
|
||||
variable "region" {
|
||||
type = string
|
||||
description = "Region this module and resources will be created in"
|
||||
}
|
||||
|
||||
variable "name" {
|
||||
type = string
|
||||
description = "Unique name for the services in this module"
|
||||
}
|
||||
|
||||
variable "environment" {
|
||||
type = string
|
||||
description = "Environment these resources reside (prod, dev, staging, etc)"
|
||||
}
|
||||
|
||||
variable "owner" {
|
||||
type = string
|
||||
description = "Owner of this environment"
|
||||
}
|
||||
|
||||
variable "tenant_id" {
|
||||
type = string
|
||||
description = "The Tenant ID"
|
||||
}
|
37
terraform/modules/postgres/main.tf
Normal file
37
terraform/modules/postgres/main.tf
Normal file
@ -0,0 +1,37 @@
|
||||
resource "azurerm_resource_group" "sql" {
|
||||
name = "${var.name}-${var.environment}-postgres"
|
||||
location = var.region
|
||||
}
|
||||
|
||||
resource "azurerm_postgresql_server" "sql" {
|
||||
name = "${var.name}-${var.environment}-sql"
|
||||
location = azurerm_resource_group.sql.location
|
||||
resource_group_name = azurerm_resource_group.sql.name
|
||||
|
||||
sku {
|
||||
name = var.sku_name
|
||||
capacity = var.sku_capacity
|
||||
tier = var.sku_tier
|
||||
family = var.sku_family
|
||||
}
|
||||
|
||||
storage_profile {
|
||||
storage_mb = var.storage_mb
|
||||
backup_retention_days = var.storage_backup_retention_days
|
||||
geo_redundant_backup = var.storage_geo_redundant_backup
|
||||
auto_grow = var.storage_auto_grow
|
||||
}
|
||||
|
||||
administrator_login = var.administrator_login
|
||||
administrator_login_password = var.administrator_login_password
|
||||
version = var.postgres_version
|
||||
ssl_enforcement = var.ssl_enforcement
|
||||
}
|
||||
|
||||
resource "azurerm_postgresql_virtual_network_rule" "sql" {
|
||||
name = "${var.name}-${var.environment}-rule"
|
||||
resource_group_name = azurerm_resource_group.sql.name
|
||||
server_name = azurerm_postgresql_server.sql.name
|
||||
subnet_id = var.subnet_id
|
||||
ignore_missing_vnet_service_endpoint = true
|
||||
}
|
0
terraform/modules/postgres/outputs.tf
Normal file
0
terraform/modules/postgres/outputs.tf
Normal file
100
terraform/modules/postgres/variables.tf
Normal file
100
terraform/modules/postgres/variables.tf
Normal file
@ -0,0 +1,100 @@
|
||||
variable "region" {
|
||||
type = string
|
||||
description = "Region this module and resources will be created in"
|
||||
}
|
||||
|
||||
variable "name" {
|
||||
type = string
|
||||
description = "Unique name for the services in this module"
|
||||
}
|
||||
|
||||
variable "environment" {
|
||||
type = string
|
||||
description = "Environment these resources reside (prod, dev, staging, etc)"
|
||||
}
|
||||
|
||||
variable "owner" {
|
||||
type = string
|
||||
description = "Owner of the environment and resources created in this module"
|
||||
}
|
||||
|
||||
variable "subnet_id" {
|
||||
type = string
|
||||
description = "Subnet the SQL server should run"
|
||||
}
|
||||
|
||||
variable "sku_name" {
|
||||
type = string
|
||||
description = "SKU name"
|
||||
default = "GP_Gen5_2"
|
||||
}
|
||||
|
||||
variable "sku_capacity" {
|
||||
type = string
|
||||
description = "SKU Capacity"
|
||||
default = "2"
|
||||
}
|
||||
|
||||
variable "sku_tier" {
|
||||
type = string
|
||||
description = "SKU Tier"
|
||||
default = "GeneralPurpose"
|
||||
|
||||
}
|
||||
|
||||
variable "sku_family" {
|
||||
type = string
|
||||
description = "SKU Family"
|
||||
default = "Gen5"
|
||||
}
|
||||
|
||||
variable "storage_mb" {
|
||||
type = string
|
||||
description = "Size in MB of the storage used for the sql server"
|
||||
default = "5120"
|
||||
}
|
||||
|
||||
|
||||
variable "storage_backup_retention_days" {
|
||||
type = string
|
||||
description = "Storage backup retention (days)"
|
||||
default = "7"
|
||||
}
|
||||
|
||||
variable "storage_geo_redundant_backup" {
|
||||
type = string
|
||||
description = "Geographic redundant backup (Enabled/Disabled)"
|
||||
default = "Disabled"
|
||||
}
|
||||
|
||||
variable "storage_auto_grow" {
|
||||
type = string
|
||||
description = "Auto Grow? (Enabled/Disabled)"
|
||||
default = "Enabled"
|
||||
}
|
||||
|
||||
variable "administrator_login" {
|
||||
type = string
|
||||
description = "Administrator login"
|
||||
default = "sqladmindude" # FIXME - Remove with wrapper using KeyVault
|
||||
}
|
||||
|
||||
variable "administrator_login_password" {
|
||||
type = string
|
||||
description = "Administrator password"
|
||||
default = "eI0l7yswwtuhHpwzoVjwRKdAcuGNsg" # FIXME - Remove with wrapper using KeyVault
|
||||
}
|
||||
|
||||
|
||||
variable "postgres_version" {
|
||||
type = string
|
||||
description = "Postgres version to use"
|
||||
default = "11"
|
||||
}
|
||||
|
||||
variable "ssl_enforcement" {
|
||||
type = string
|
||||
description = "Enforce SSL (Enabled/Disable)"
|
||||
default = "Enabled"
|
||||
}
|
||||
|
72
terraform/modules/vpc/main.tf
Normal file
72
terraform/modules/vpc/main.tf
Normal file
@ -0,0 +1,72 @@
|
||||
resource "azurerm_resource_group" "vpc" {
|
||||
name = "${var.name}-${var.environment}-vpc"
|
||||
location = var.region
|
||||
|
||||
tags = {
|
||||
environment = var.environment
|
||||
owner = var.owner
|
||||
}
|
||||
}
|
||||
|
||||
resource "azurerm_network_ddos_protection_plan" "vpc" {
|
||||
count = var.ddos_enabled
|
||||
name = "${var.name}-${var.environment}-ddos"
|
||||
location = azurerm_resource_group.vpc.location
|
||||
resource_group_name = azurerm_resource_group.vpc.name
|
||||
}
|
||||
|
||||
resource "azurerm_virtual_network" "vpc" {
|
||||
name = "${var.name}-${var.environment}-network"
|
||||
location = azurerm_resource_group.vpc.location
|
||||
resource_group_name = azurerm_resource_group.vpc.name
|
||||
address_space = ["${var.virtual_network}"]
|
||||
dns_servers = var.dns_servers
|
||||
|
||||
tags = {
|
||||
environment = var.environment
|
||||
owner = var.owner
|
||||
}
|
||||
}
|
||||
|
||||
resource "azurerm_subnet" "subnet" {
|
||||
for_each = var.networks
|
||||
name = "${var.name}-${var.environment}-${each.key}"
|
||||
resource_group_name = azurerm_resource_group.vpc.name
|
||||
virtual_network_name = azurerm_virtual_network.vpc.name
|
||||
address_prefix = element(split(",", each.value), 0)
|
||||
|
||||
# See https://github.com/terraform-providers/terraform-provider-azurerm/issues/3471
|
||||
lifecycle {
|
||||
ignore_changes = [route_table_id]
|
||||
}
|
||||
#delegation {
|
||||
# name = "acctestdelegation"
|
||||
#
|
||||
# service_delegation {
|
||||
# name = "Microsoft.ContainerInstance/containerGroups"
|
||||
# actions = ["Microsoft.Network/virtualNetworks/subnets/action"]
|
||||
# }
|
||||
#}
|
||||
}
|
||||
|
||||
resource "azurerm_route_table" "route_table" {
|
||||
for_each = var.route_tables
|
||||
name = "${var.name}-${var.environment}-${each.key}"
|
||||
location = azurerm_resource_group.vpc.location
|
||||
resource_group_name = azurerm_resource_group.vpc.name
|
||||
}
|
||||
|
||||
resource "azurerm_subnet_route_table_association" "route_table" {
|
||||
for_each = var.networks
|
||||
subnet_id = azurerm_subnet.subnet[each.key].id
|
||||
route_table_id = azurerm_route_table.route_table[each.key].id
|
||||
}
|
||||
|
||||
resource "azurerm_route" "route" {
|
||||
for_each = var.route_tables
|
||||
name = "${var.name}-${var.environment}-default"
|
||||
resource_group_name = azurerm_resource_group.vpc.name
|
||||
route_table_name = azurerm_route_table.route_table[each.key].name
|
||||
address_prefix = "0.0.0.0/0"
|
||||
next_hop_type = each.value
|
||||
}
|
3
terraform/modules/vpc/outputs.tf
Normal file
3
terraform/modules/vpc/outputs.tf
Normal file
@ -0,0 +1,3 @@
|
||||
output "subnets" {
|
||||
value = azurerm_subnet.subnet["private"].id #FIXME - output should be a map
|
||||
}
|
43
terraform/modules/vpc/variables.tf
Normal file
43
terraform/modules/vpc/variables.tf
Normal file
@ -0,0 +1,43 @@
|
||||
variable "environment" {
|
||||
description = "Environment (Prod,Dev,etc)"
|
||||
}
|
||||
|
||||
variable "region" {
|
||||
description = "Region (useast2, etc)"
|
||||
|
||||
}
|
||||
|
||||
variable "name" {
|
||||
description = "Name or prefix to use for all resources created by this module"
|
||||
}
|
||||
|
||||
variable "owner" {
|
||||
description = "Owner of these resources"
|
||||
|
||||
}
|
||||
|
||||
variable "ddos_enabled" {
|
||||
description = "Enable or disable DDoS Protection (1,0)"
|
||||
default = "0"
|
||||
}
|
||||
|
||||
variable "virtual_network" {
|
||||
description = "The supernet used for this VPC a.k.a Virtual Network"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "networks" {
|
||||
description = "A map of lists describing the network topology"
|
||||
type = map
|
||||
}
|
||||
|
||||
variable "dns_servers" {
|
||||
description = "DNS Server IPs for internal and public DNS lookups (must be on a defined subnet)"
|
||||
type = list
|
||||
|
||||
}
|
||||
|
||||
variable "route_tables" {
|
||||
type = map
|
||||
description = "A map with the route tables to create"
|
||||
}
|
11
terraform/providers/dev/k8s.tf
Normal file
11
terraform/providers/dev/k8s.tf
Normal file
@ -0,0 +1,11 @@
|
||||
module "k8s" {
|
||||
source = "../../modules/k8s"
|
||||
region = var.region
|
||||
name = var.name
|
||||
environment = var.environment
|
||||
owner = var.owner
|
||||
k8s_dns_prefix = var.k8s_dns_prefix
|
||||
k8s_node_size = var.k8s_node_size
|
||||
vnet_subnet_id = module.vpc.subnets #FIXME - output from module.vpc.subnets should be map
|
||||
}
|
||||
|
8
terraform/providers/dev/keyvault.tf
Normal file
8
terraform/providers/dev/keyvault.tf
Normal file
@ -0,0 +1,8 @@
|
||||
module "keyvault" {
|
||||
source = "../../modules/keyvault"
|
||||
name = var.name
|
||||
region = var.region
|
||||
owner = var.owner
|
||||
environment = var.environment
|
||||
tenant_id = var.tenant_id
|
||||
}
|
8
terraform/providers/dev/postgres.tf
Normal file
8
terraform/providers/dev/postgres.tf
Normal file
@ -0,0 +1,8 @@
|
||||
module "sql" {
|
||||
source = "../../modules/postgres"
|
||||
name = var.name
|
||||
owner = var.owner
|
||||
environment = var.environment
|
||||
region = var.region
|
||||
subnet_id = module.vpc.subnets # FIXME - Should be a map of subnets and specify private
|
||||
}
|
17
terraform/providers/dev/provider.tf
Normal file
17
terraform/providers/dev/provider.tf
Normal file
@ -0,0 +1,17 @@
|
||||
provider "azurerm" {
|
||||
version = "=1.38.0"
|
||||
}
|
||||
|
||||
provider "azuread" {
|
||||
# Whilst version is optional, we /strongly recommend/ using it to pin the version of the Provider being used
|
||||
version = "=0.7.0"
|
||||
}
|
||||
|
||||
terraform {
|
||||
backend "azurerm" {
|
||||
resource_group_name = "cloudzero-dev-tfstate"
|
||||
storage_account_name = "cloudzerodevtfstate"
|
||||
container_name = "tfstate"
|
||||
key = "dev.terraform.tfstate"
|
||||
}
|
||||
}
|
61
terraform/providers/dev/variables.tf
Normal file
61
terraform/providers/dev/variables.tf
Normal file
@ -0,0 +1,61 @@
|
||||
variable "environment" {
|
||||
default = "dev"
|
||||
}
|
||||
|
||||
variable "region" {
|
||||
default = "eastus2"
|
||||
|
||||
}
|
||||
|
||||
variable "owner" {
|
||||
default = "dev"
|
||||
}
|
||||
|
||||
variable "name" {
|
||||
default = "cloudzero"
|
||||
}
|
||||
|
||||
variable "virtual_network" {
|
||||
type = string
|
||||
default = "10.1.0.0/16"
|
||||
}
|
||||
|
||||
|
||||
variable "networks" {
|
||||
type = map
|
||||
default = {
|
||||
#format
|
||||
#name = "CIDR, route table, Security Group Name"
|
||||
public = "10.1.1.0/24,public" # LBs
|
||||
private = "10.1.2.0/24,private" # k8s, postgres, redis, dns, ad
|
||||
}
|
||||
}
|
||||
|
||||
variable "route_tables" {
|
||||
description = "Route tables and their default routes"
|
||||
type = map
|
||||
default = {
|
||||
public = "Internet"
|
||||
private = "VnetLocal"
|
||||
}
|
||||
}
|
||||
|
||||
variable "dns_servers" {
|
||||
type = list
|
||||
default = ["10.1.2.4", "10.1.2.5"]
|
||||
}
|
||||
|
||||
variable "k8s_node_size" {
|
||||
type = string
|
||||
default = "Standard_A1_v2"
|
||||
}
|
||||
|
||||
variable "k8s_dns_prefix" {
|
||||
type = string
|
||||
default = "atat"
|
||||
}
|
||||
|
||||
variable "tenant_id" {
|
||||
type = string
|
||||
default = "b5ab0e1e-09f8-4258-afb7-fb17654bc5b3"
|
||||
}
|
12
terraform/providers/dev/vpc.tf
Normal file
12
terraform/providers/dev/vpc.tf
Normal file
@ -0,0 +1,12 @@
|
||||
module "vpc" {
|
||||
source = "../../modules/vpc/"
|
||||
environment = var.environment
|
||||
region = var.region
|
||||
virtual_network = var.virtual_network
|
||||
networks = var.networks
|
||||
route_tables = var.route_tables
|
||||
owner = var.owner
|
||||
name = var.name
|
||||
dns_servers = var.dns_servers
|
||||
}
|
||||
|
@ -5,7 +5,7 @@ from atst.models import CSPRole, ApplicationRoleStatus
|
||||
from atst.domain.application_roles import ApplicationRoles
|
||||
from atst.domain.applications import Applications
|
||||
from atst.domain.environment_roles import EnvironmentRoles
|
||||
from atst.domain.exceptions import NotFoundError
|
||||
from atst.domain.exceptions import AlreadyExistsError, NotFoundError
|
||||
from atst.domain.permission_sets import PermissionSets
|
||||
|
||||
from tests.factories import (
|
||||
@ -177,3 +177,22 @@ def test_invite_to_nonexistent_environment():
|
||||
{"environment_id": uuid4(), "role": CSPRole.BASIC_ACCESS.value},
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
def test_create_does_not_duplicate_names_within_portfolio():
|
||||
portfolio = PortfolioFactory.create()
|
||||
name = "An Awesome Application"
|
||||
|
||||
assert Applications.create(portfolio.owner, portfolio, name, "")
|
||||
with pytest.raises(AlreadyExistsError):
|
||||
Applications.create(portfolio.owner, portfolio, name, "")
|
||||
|
||||
|
||||
def test_update_does_not_duplicate_names_within_portfolio():
|
||||
portfolio = PortfolioFactory.create()
|
||||
name = "An Awesome Application"
|
||||
application = ApplicationFactory.create(portfolio=portfolio, name=name)
|
||||
dupe_application = ApplicationFactory.create(portfolio=portfolio)
|
||||
|
||||
with pytest.raises(AlreadyExistsError):
|
||||
Applications.update(dupe_application, {"name": name})
|
||||
|
@ -1,4 +1,5 @@
|
||||
import pytest
|
||||
import random
|
||||
from uuid import uuid4
|
||||
|
||||
from atst.domain.exceptions import NotFoundError, UnauthorizedError
|
||||
@ -97,7 +98,7 @@ def test_scoped_portfolio_returns_all_applications_for_portfolio_admin(
|
||||
Applications.create(
|
||||
portfolio.owner,
|
||||
portfolio,
|
||||
"My Application",
|
||||
"My Application %s" % (random.randrange(1, 1000)),
|
||||
"My application",
|
||||
["dev", "staging", "prod"],
|
||||
)
|
||||
@ -120,7 +121,7 @@ def test_scoped_portfolio_returns_all_applications_for_portfolio_owner(
|
||||
Applications.create(
|
||||
portfolio.owner,
|
||||
portfolio,
|
||||
"My Application",
|
||||
"My Application %s" % (random.randrange(1, 1000)),
|
||||
"My application",
|
||||
["dev", "staging", "prod"],
|
||||
)
|
||||
|
@ -70,6 +70,24 @@ def test_post_name_and_description_for_update(client, session, user_session):
|
||||
assert application.description == "This is only a test"
|
||||
|
||||
|
||||
def test_post_name_and_description_enforces_unique_name(client, user_session, session):
|
||||
portfolio = PortfolioFactory.create()
|
||||
name = "Test Application"
|
||||
application = ApplicationFactory.create(portfolio=portfolio, name=name)
|
||||
user_session(portfolio.owner)
|
||||
|
||||
session.begin_nested()
|
||||
response = client.post(
|
||||
url_for(
|
||||
"applications.create_new_application_step_1", portfolio_id=portfolio.id
|
||||
),
|
||||
data={"name": name, "description": "This is only a test"},
|
||||
)
|
||||
session.rollback()
|
||||
|
||||
assert response.status_code == 400
|
||||
|
||||
|
||||
def test_get_environments(client, user_session):
|
||||
application = ApplicationFactory.create()
|
||||
user_session(application.portfolio.owner)
|
||||
|
@ -1,6 +1,7 @@
|
||||
from unittest.mock import Mock
|
||||
|
||||
import pytest
|
||||
import random
|
||||
|
||||
from flask import url_for, Response
|
||||
|
||||
@ -264,26 +265,28 @@ def test_applications_post_application_step_1(post_url_assert_status):
|
||||
rando = user_with()
|
||||
portfolio = PortfolioFactory.create(owner=owner)
|
||||
application = ApplicationFactory.create(portfolio=portfolio)
|
||||
step_1_form_data = {
|
||||
"name": "Test Application",
|
||||
"description": "This is only a test",
|
||||
}
|
||||
|
||||
def _form_data():
|
||||
return {
|
||||
"name": "Test Application %s" % (random.randrange(1, 1000)),
|
||||
"description": "This is only a test",
|
||||
}
|
||||
|
||||
url = url_for(
|
||||
"applications.create_new_application_step_1", portfolio_id=portfolio.id
|
||||
)
|
||||
post_url_assert_status(ccpo, url, 302, data=step_1_form_data)
|
||||
post_url_assert_status(owner, url, 302, data=step_1_form_data)
|
||||
post_url_assert_status(rando, url, 404, data=step_1_form_data)
|
||||
post_url_assert_status(ccpo, url, 302, data=_form_data())
|
||||
post_url_assert_status(owner, url, 302, data=_form_data())
|
||||
post_url_assert_status(rando, url, 404, data=_form_data())
|
||||
|
||||
url = url_for(
|
||||
"applications.update_new_application_step_1",
|
||||
portfolio_id=portfolio.id,
|
||||
application_id=application.id,
|
||||
)
|
||||
post_url_assert_status(ccpo, url, 302, data=step_1_form_data)
|
||||
post_url_assert_status(owner, url, 302, data=step_1_form_data)
|
||||
post_url_assert_status(rando, url, 404, data=step_1_form_data)
|
||||
post_url_assert_status(ccpo, url, 302, data=_form_data())
|
||||
post_url_assert_status(owner, url, 302, data=_form_data())
|
||||
post_url_assert_status(rando, url, 404, data=_form_data())
|
||||
|
||||
|
||||
# applications.view_new_application_step_2
|
||||
|
@ -114,6 +114,8 @@ flash:
|
||||
message: '{application_name} has been successfully created. You may continue on to provision environments and assign team members now, or come back and complete these tasks at a later time.'
|
||||
updated: 'You have successfully updated the {application_name} application.'
|
||||
deleted: 'You have successfully deleted the {application_name} application. To view the retained activity log, visit the portfolio administration page.'
|
||||
name_error:
|
||||
message: 'The application name {name} has already been used in this portfolio. Please enter a unique name.'
|
||||
delete_member_success: 'You have successfully deleted {member_name} from the portfolio.'
|
||||
deleted_member: Portfolio member deleted
|
||||
environment_added: 'The environment "{env_name}" has been added to the application.'
|
||||
|
Loading…
x
Reference in New Issue
Block a user