Merge branch 'staging' into portfolio-data-as-dict

This commit is contained in:
tomdds 2020-02-11 13:17:18 -05:00 committed by GitHub
commit 2ed5c5b588
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
169 changed files with 1253 additions and 3937 deletions

View File

@ -3,7 +3,7 @@
"files": "^.secrets.baseline$|^.*pgsslrootcert.yml$",
"lines": null
},
"generated_at": "2020-01-27T19:24:43Z",
"generated_at": "2020-02-10T21:40:38Z",
"plugins_used": [
{
"base64_limit": 4.5,
@ -82,7 +82,7 @@
"hashed_secret": "afc848c316af1a89d49826c5ae9d00ed769415f3",
"is_secret": false,
"is_verified": false,
"line_number": 32,
"line_number": 43,
"type": "Secret Keyword"
}
],

View File

@ -101,5 +101,7 @@ RUN mkdir /var/run/uwsgi && \
chown -R atst:atat /var/run/uwsgi && \
chown -R atst:atat "${APP_DIR}"
RUN update-ca-certificates
# Run as the unprivileged APP user
USER atst

View File

@ -0,0 +1,30 @@
"""change to environment_roles.cloud_Id
Revision ID: 418b52c1cedf
Revises: 542bd3215dec
Create Date: 2020-02-05 13:40:37.870183
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '418b52c1cedf' # pragma: allowlist secret
down_revision = '542bd3215dec' # pragma: allowlist secret
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('environment_roles', sa.Column('cloud_id', sa.String(), nullable=True))
op.drop_column('environment_roles', 'csp_user_id')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('environment_roles', sa.Column('csp_user_id', sa.VARCHAR(), autoincrement=False, nullable=True))
op.drop_column('environment_roles', 'cloud_id')
# ### end Alembic commands ###

View File

@ -0,0 +1,264 @@
"""state machine stage added.
Revision ID: 542bd3215dec
Revises: 567bfb019a87
Create Date: 2020-02-06 12:01:58.077840
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "542bd3215dec" # pragma: allowlist secret
down_revision = "567bfb019a87" # pragma: allowlist secret
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column(
"portfolio_state_machines",
"state",
type_=sa.Enum(
"UNSTARTED",
"STARTING",
"STARTED",
"COMPLETED",
"FAILED",
"TENANT_CREATED",
"TENANT_IN_PROGRESS",
"TENANT_FAILED",
"BILLING_PROFILE_CREATION_CREATED",
"BILLING_PROFILE_CREATION_IN_PROGRESS",
"BILLING_PROFILE_CREATION_FAILED",
"BILLING_PROFILE_VERIFICATION_CREATED",
"BILLING_PROFILE_VERIFICATION_IN_PROGRESS",
"BILLING_PROFILE_VERIFICATION_FAILED",
"BILLING_PROFILE_TENANT_ACCESS_CREATED",
"BILLING_PROFILE_TENANT_ACCESS_IN_PROGRESS",
"BILLING_PROFILE_TENANT_ACCESS_FAILED",
"TASK_ORDER_BILLING_CREATION_CREATED",
"TASK_ORDER_BILLING_CREATION_IN_PROGRESS",
"TASK_ORDER_BILLING_CREATION_FAILED",
"TASK_ORDER_BILLING_VERIFICATION_CREATED",
"TASK_ORDER_BILLING_VERIFICATION_IN_PROGRESS",
"TASK_ORDER_BILLING_VERIFICATION_FAILED",
"BILLING_INSTRUCTION_CREATED",
"BILLING_INSTRUCTION_IN_PROGRESS",
"BILLING_INSTRUCTION_FAILED",
"PRODUCT_PURCHASE_CREATED",
"PRODUCT_PURCHASE_IN_PROGRESS",
"PRODUCT_PURCHASE_FAILED",
"PRODUCT_PURCHASE_VERIFICATION_CREATED",
"PRODUCT_PURCHASE_VERIFICATION_IN_PROGRESS",
"PRODUCT_PURCHASE_VERIFICATION_FAILED",
"TENANT_PRINCIPAL_APP_CREATED",
"TENANT_PRINCIPAL_APP_IN_PROGRESS",
"TENANT_PRINCIPAL_APP_FAILED",
"TENANT_PRINCIPAL_CREATED",
"TENANT_PRINCIPAL_IN_PROGRESS",
"TENANT_PRINCIPAL_FAILED",
"TENANT_PRINCIPAL_CREDENTIAL_CREATED",
"TENANT_PRINCIPAL_CREDENTIAL_IN_PROGRESS",
"TENANT_PRINCIPAL_CREDENTIAL_FAILED",
"ADMIN_ROLE_DEFINITION_CREATED",
"ADMIN_ROLE_DEFINITION_IN_PROGRESS",
"ADMIN_ROLE_DEFINITION_FAILED",
"PRINCIPAL_ADMIN_ROLE_CREATED",
"PRINCIPAL_ADMIN_ROLE_IN_PROGRESS",
"PRINCIPAL_ADMIN_ROLE_FAILED",
"INITIAL_MGMT_GROUP_CREATED",
"INITIAL_MGMT_GROUP_IN_PROGRESS",
"INITIAL_MGMT_GROUP_FAILED",
"INITIAL_MGMT_GROUP_VERIFICATION_CREATED",
"INITIAL_MGMT_GROUP_VERIFICATION_IN_PROGRESS",
"INITIAL_MGMT_GROUP_VERIFICATION_FAILED",
"TENANT_ADMIN_OWNERSHIP_CREATED",
"TENANT_ADMIN_OWNERSHIP_IN_PROGRESS",
"TENANT_ADMIN_OWNERSHIP_FAILED",
"TENANT_PRINCIPAL_OWNERSHIP_CREATED",
"TENANT_PRINCIPAL_OWNERSHIP_IN_PROGRESS",
"TENANT_PRINCIPAL_OWNERSHIP_FAILED",
name="fsmstates",
native_enum=False,
),
existing_type=sa.Enum(
"UNSTARTED",
"STARTING",
"STARTED",
"COMPLETED",
"FAILED",
"TENANT_CREATED",
"TENANT_IN_PROGRESS",
"TENANT_FAILED",
"BILLING_PROFILE_CREATION_CREATED",
"BILLING_PROFILE_CREATION_IN_PROGRESS",
"BILLING_PROFILE_CREATION_FAILED",
"BILLING_PROFILE_VERIFICATION_CREATED",
"BILLING_PROFILE_VERIFICATION_IN_PROGRESS",
"BILLING_PROFILE_VERIFICATION_FAILED",
"BILLING_PROFILE_TENANT_ACCESS_CREATED",
"BILLING_PROFILE_TENANT_ACCESS_IN_PROGRESS",
"BILLING_PROFILE_TENANT_ACCESS_FAILED",
"TASK_ORDER_BILLING_CREATION_CREATED",
"TASK_ORDER_BILLING_CREATION_IN_PROGRESS",
"TASK_ORDER_BILLING_CREATION_FAILED",
"TASK_ORDER_BILLING_VERIFICATION_CREATED",
"TASK_ORDER_BILLING_VERIFICATION_IN_PROGRESS",
"TASK_ORDER_BILLING_VERIFICATION_FAILED",
"BILLING_INSTRUCTION_CREATED",
"BILLING_INSTRUCTION_IN_PROGRESS",
"BILLING_INSTRUCTION_FAILED",
"TENANT_PRINCIPAL_APP_CREATED",
"TENANT_PRINCIPAL_APP_IN_PROGRESS",
"TENANT_PRINCIPAL_APP_FAILED",
"TENANT_PRINCIPAL_CREATED",
"TENANT_PRINCIPAL_IN_PROGRESS",
"TENANT_PRINCIPAL_FAILED",
"TENANT_PRINCIPAL_CREDENTIAL_CREATED",
"TENANT_PRINCIPAL_CREDENTIAL_IN_PROGRESS",
"TENANT_PRINCIPAL_CREDENTIAL_FAILED",
"ADMIN_ROLE_DEFINITION_CREATED",
"ADMIN_ROLE_DEFINITION_IN_PROGRESS",
"ADMIN_ROLE_DEFINITION_FAILED",
"PRINCIPAL_ADMIN_ROLE_CREATED",
"PRINCIPAL_ADMIN_ROLE_IN_PROGRESS",
"PRINCIPAL_ADMIN_ROLE_FAILED",
"TENANT_ADMIN_OWNERSHIP_CREATED",
"TENANT_ADMIN_OWNERSHIP_IN_PROGRESS",
"TENANT_ADMIN_OWNERSHIP_FAILED",
"TENANT_PRINCIPAL_OWNERSHIP_CREATED",
"TENANT_PRINCIPAL_OWNERSHIP_IN_PROGRESS",
"TENANT_PRINCIPAL_OWNERSHIP_FAILED",
name="fsmstates",
native_enum=False,
),
existing_nullable=False,
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column(
"portfolio_state_machines",
"state",
type_=sa.Enum(
"UNSTARTED",
"STARTING",
"STARTED",
"COMPLETED",
"FAILED",
"TENANT_CREATED",
"TENANT_IN_PROGRESS",
"TENANT_FAILED",
"BILLING_PROFILE_CREATION_CREATED",
"BILLING_PROFILE_CREATION_IN_PROGRESS",
"BILLING_PROFILE_CREATION_FAILED",
"BILLING_PROFILE_VERIFICATION_CREATED",
"BILLING_PROFILE_VERIFICATION_IN_PROGRESS",
"BILLING_PROFILE_VERIFICATION_FAILED",
"BILLING_PROFILE_TENANT_ACCESS_CREATED",
"BILLING_PROFILE_TENANT_ACCESS_IN_PROGRESS",
"BILLING_PROFILE_TENANT_ACCESS_FAILED",
"TASK_ORDER_BILLING_CREATION_CREATED",
"TASK_ORDER_BILLING_CREATION_IN_PROGRESS",
"TASK_ORDER_BILLING_CREATION_FAILED",
"TASK_ORDER_BILLING_VERIFICATION_CREATED",
"TASK_ORDER_BILLING_VERIFICATION_IN_PROGRESS",
"TASK_ORDER_BILLING_VERIFICATION_FAILED",
"BILLING_INSTRUCTION_CREATED",
"BILLING_INSTRUCTION_IN_PROGRESS",
"BILLING_INSTRUCTION_FAILED",
"TENANT_PRINCIPAL_APP_CREATED",
"TENANT_PRINCIPAL_APP_IN_PROGRESS",
"TENANT_PRINCIPAL_APP_FAILED",
"TENANT_PRINCIPAL_CREATED",
"TENANT_PRINCIPAL_IN_PROGRESS",
"TENANT_PRINCIPAL_FAILED",
"TENANT_PRINCIPAL_CREDENTIAL_CREATED",
"TENANT_PRINCIPAL_CREDENTIAL_IN_PROGRESS",
"TENANT_PRINCIPAL_CREDENTIAL_FAILED",
"ADMIN_ROLE_DEFINITION_CREATED",
"ADMIN_ROLE_DEFINITION_IN_PROGRESS",
"ADMIN_ROLE_DEFINITION_FAILED",
"PRINCIPAL_ADMIN_ROLE_CREATED",
"PRINCIPAL_ADMIN_ROLE_IN_PROGRESS",
"PRINCIPAL_ADMIN_ROLE_FAILED",
"TENANT_ADMIN_OWNERSHIP_CREATED",
"TENANT_ADMIN_OWNERSHIP_IN_PROGRESS",
"TENANT_ADMIN_OWNERSHIP_FAILED",
"TENANT_PRINCIPAL_OWNERSHIP_CREATED",
"TENANT_PRINCIPAL_OWNERSHIP_IN_PROGRESS",
"TENANT_PRINCIPAL_OWNERSHIP_FAILED",
name="fsmstates",
native_enum=False,
),
existing_type=sa.Enum(
"UNSTARTED",
"STARTING",
"STARTED",
"COMPLETED",
"FAILED",
"TENANT_CREATED",
"TENANT_IN_PROGRESS",
"TENANT_FAILED",
"BILLING_PROFILE_CREATION_CREATED",
"BILLING_PROFILE_CREATION_IN_PROGRESS",
"BILLING_PROFILE_CREATION_FAILED",
"BILLING_PROFILE_VERIFICATION_CREATED",
"BILLING_PROFILE_VERIFICATION_IN_PROGRESS",
"BILLING_PROFILE_VERIFICATION_FAILED",
"BILLING_PROFILE_TENANT_ACCESS_CREATED",
"BILLING_PROFILE_TENANT_ACCESS_IN_PROGRESS",
"BILLING_PROFILE_TENANT_ACCESS_FAILED",
"TASK_ORDER_BILLING_CREATION_CREATED",
"TASK_ORDER_BILLING_CREATION_IN_PROGRESS",
"TASK_ORDER_BILLING_CREATION_FAILED",
"TASK_ORDER_BILLING_VERIFICATION_CREATED",
"TASK_ORDER_BILLING_VERIFICATION_IN_PROGRESS",
"TASK_ORDER_BILLING_VERIFICATION_FAILED",
"BILLING_INSTRUCTION_CREATED",
"BILLING_INSTRUCTION_IN_PROGRESS",
"BILLING_INSTRUCTION_FAILED",
"PRODUCT_PURCHASE_CREATED",
"PRODUCT_PURCHASE_IN_PROGRESS",
"PRODUCT_PURCHASE_FAILED",
"PRODUCT_PURCHASE_VERIFICATION_CREATED",
"PRODUCT_PURCHASE_VERIFICATION_IN_PROGRESS",
"PRODUCT_PURCHASE_VERIFICATION_FAILED",
"TENANT_PRINCIPAL_APP_CREATED",
"TENANT_PRINCIPAL_APP_IN_PROGRESS",
"TENANT_PRINCIPAL_APP_FAILED",
"TENANT_PRINCIPAL_CREATED",
"TENANT_PRINCIPAL_IN_PROGRESS",
"TENANT_PRINCIPAL_FAILED",
"TENANT_PRINCIPAL_CREDENTIAL_CREATED",
"TENANT_PRINCIPAL_CREDENTIAL_IN_PROGRESS",
"TENANT_PRINCIPAL_CREDENTIAL_FAILED",
"ADMIN_ROLE_DEFINITION_CREATED",
"ADMIN_ROLE_DEFINITION_IN_PROGRESS",
"ADMIN_ROLE_DEFINITION_FAILED",
"PRINCIPAL_ADMIN_ROLE_CREATED",
"PRINCIPAL_ADMIN_ROLE_IN_PROGRESS",
"PRINCIPAL_ADMIN_ROLE_FAILED",
"INITIAL_MGMT_GROUP_CREATED",
"INITIAL_MGMT_GROUP_IN_PROGRESS",
"INITIAL_MGMT_GROUP_FAILED",
"INITIAL_MGMT_GROUP_VERIFICATION_CREATED",
"INITIAL_MGMT_GROUP_VERIFICATION_IN_PROGRESS",
"INITIAL_MGMT_GROUP_VERIFICATION_FAILED",
"TENANT_ADMIN_OWNERSHIP_CREATED",
"TENANT_ADMIN_OWNERSHIP_IN_PROGRESS",
"TENANT_ADMIN_OWNERSHIP_FAILED",
"TENANT_PRINCIPAL_OWNERSHIP_CREATED",
"TENANT_PRINCIPAL_OWNERSHIP_IN_PROGRESS",
"TENANT_PRINCIPAL_OWNERSHIP_FAILED",
name="fsmstates",
native_enum=False,
),
existing_nullable=False,
)

View File

@ -157,7 +157,6 @@ def map_config(config):
**config["default"],
"USE_AUDIT_LOG": config["default"].getboolean("USE_AUDIT_LOG"),
"ENV": config["default"]["ENVIRONMENT"],
"BROKER_URL": config["default"]["REDIS_URI"],
"DEBUG": config["default"].getboolean("DEBUG"),
"DEBUG_MAILER": config["default"].getboolean("DEBUG_MAILER"),
"SQLALCHEMY_ECHO": config["default"].getboolean("SQLALCHEMY_ECHO"),
@ -233,13 +232,34 @@ def make_config(direct_config=None):
config.set("default", "DATABASE_URI", database_uri)
# Assemble REDIS_URI value
redis_use_tls = config["default"].getboolean("REDIS_TLS")
redis_uri = "redis{}://{}:{}@{}".format( # pragma: allowlist secret
("s" if config["default"].getboolean("REDIS_TLS") else ""),
("s" if redis_use_tls else ""),
(config.get("default", "REDIS_USER") or ""),
(config.get("default", "REDIS_PASSWORD") or ""),
config.get("default", "REDIS_HOST"),
)
celery_uri = redis_uri
if redis_use_tls:
tls_mode = config.get("default", "REDIS_SSLMODE")
tls_mode_str = tls_mode.lower() if tls_mode else "none"
redis_uri = f"{redis_uri}/?ssl_cert_reqs={tls_mode_str}"
# TODO: Kombu, one of Celery's dependencies, still requires
# that ssl_cert_reqs be passed as the string version of an
# option on the ssl module. We can clean this up and use
# the REDIS_URI for both when this PR to Kombu is released:
# https://github.com/celery/kombu/pull/1139
kombu_modes = {
"none": "CERT_NONE",
"required": "CERT_REQUIRED",
"optional": "CERT_OPTIONAL",
}
celery_tls_mode_str = kombu_modes[tls_mode_str]
celery_uri = f"{celery_uri}/?ssl_cert_reqs={celery_tls_mode_str}"
config.set("default", "REDIS_URI", redis_uri)
config.set("default", "BROKER_URL", celery_uri)
return map_config(config)

View File

@ -1,6 +1,5 @@
import json
from secrets import token_urlsafe
from typing import Any, Dict
from uuid import uuid4
from atst.utils import sha256_hex
@ -25,6 +24,10 @@ from .models import (
BillingProfileVerificationCSPPayload,
BillingProfileVerificationCSPResult,
CostManagementQueryCSPResult,
InitialMgmtGroupCSPPayload,
InitialMgmtGroupCSPResult,
InitialMgmtGroupVerificationCSPPayload,
InitialMgmtGroupVerificationCSPResult,
EnvironmentCSPPayload,
EnvironmentCSPResult,
KeyVaultCredentials,
@ -57,6 +60,8 @@ from .models import (
TenantPrincipalOwnershipCSPResult,
UserCSPPayload,
UserCSPResult,
UserRoleCSPPayload,
UserRoleCSPResult,
)
from .policy import AzurePolicyManager
@ -103,10 +108,14 @@ class AzureCloudProvider(CloudProviderInterface):
self.secret_key = config["AZURE_SECRET_KEY"]
self.tenant_id = config["AZURE_TENANT_ID"]
self.vault_url = config["AZURE_VAULT_URL"]
self.ps_client_id = config["POWERSHELL_CLIENT_ID"]
self.owner_role_def_id = config["AZURE_OWNER_ROLE_DEF_ID"]
self.ps_client_id = config["AZURE_POWERSHELL_CLIENT_ID"]
self.graph_resource = config["AZURE_GRAPH_RESOURCE"]
self.default_aadp_qty = config["AZURE_AADP_QTY"]
self.roles = {
"owner": config["AZURE_ROLE_DEF_ID_OWNER"],
"contributor": config["AZURE_ROLE_DEF_ID_CONTRIBUTOR"],
"billing": config["AZURE_ROLE_DEF_ID_BILLING_READER"],
}
if azure_sdk_provider is None:
self.sdk = AzureSDKProvider()
@ -189,6 +198,38 @@ class AzureCloudProvider(CloudProviderInterface):
return ApplicationCSPResult(**response)
def create_initial_mgmt_group(self, payload: InitialMgmtGroupCSPPayload):
creds = self._source_creds(payload.tenant_id)
credentials = self._get_credential_obj(
{
"client_id": creds.root_sp_client_id,
"secret_key": creds.root_sp_key,
"tenant_id": creds.root_tenant_id,
},
resource=self.sdk.cloud.endpoints.resource_manager,
)
response = self._create_management_group(
credentials, payload.management_group_name, payload.display_name,
)
return InitialMgmtGroupCSPResult(**response)
def create_initial_mgmt_group_verification(
self, payload: InitialMgmtGroupVerificationCSPPayload
):
creds = self._source_creds(payload.tenant_id)
credentials = self._get_credential_obj(
{
"client_id": creds.root_sp_client_id,
"secret_key": creds.root_sp_key,
"tenant_id": creds.root_tenant_id,
},
resource=self.sdk.cloud.endpoints.resource_manager,
)
response = self._get_management_group(credentials, payload.tenant_id,)
return InitialMgmtGroupVerificationCSPResult(**response.result())
def _create_management_group(
self, credentials, management_group_id, display_name, parent_id=None,
):
@ -215,6 +256,11 @@ class AzureCloudProvider(CloudProviderInterface):
# instead?
return create_request.result()
def _get_management_group(self, credentials, management_group_id):
mgmgt_group_client = self.sdk.managementgroups.ManagementGroupsAPI(credentials)
response = mgmgt_group_client.management_groups.get(management_group_id)
return response
def _create_policy_definition(
self, credentials, subscription_id, management_group_id, properties,
):
@ -580,7 +626,7 @@ class AzureCloudProvider(CloudProviderInterface):
def create_tenant_admin_ownership(self, payload: TenantAdminOwnershipCSPPayload):
mgmt_token = self._get_elevated_management_token(payload.tenant_id)
role_definition_id = f"/providers/Microsoft.Management/managementGroups/{payload.tenant_id}/providers/Microsoft.Authorization/roleDefinitions/{self.owner_role_def_id}"
role_definition_id = f"/providers/Microsoft.Management/managementGroups/{payload.tenant_id}/providers/Microsoft.Authorization/roleDefinitions/{self.roles['owner']}"
request_body = {
"properties": {
@ -608,7 +654,7 @@ class AzureCloudProvider(CloudProviderInterface):
mgmt_token = self._get_elevated_management_token(payload.tenant_id)
# NOTE: the tenant_id is also the id of the root management group, once it is created
role_definition_id = f"/providers/Microsoft.Management/managementGroups/{payload.tenant_id}/providers/Microsoft.Authorization/roleDefinitions/{self.owner_role_def_id}"
role_definition_id = f"/providers/Microsoft.Management/managementGroups/{payload.tenant_id}/providers/Microsoft.Authorization/roleDefinitions/{self.roles['owner']}"
request_body = {
"properties": {
@ -895,6 +941,40 @@ class AzureCloudProvider(CloudProviderInterface):
f"Failed update user email: {response.json()}"
)
def create_user_role(self, payload: UserRoleCSPPayload):
graph_token = self._get_tenant_principal_token(payload.tenant_id)
if graph_token is None:
raise AuthenticationException(
"Could not resolve graph token for tenant admin"
)
role_guid = self.roles[payload.role]
role_definition_id = f"/providers/Microsoft.Management/managementGroups/{payload.management_group_id}/providers/Microsoft.Authorization/roleDefinitions/{role_guid}"
request_body = {
"properties": {
"roleDefinitionId": role_definition_id,
"principalId": payload.user_object_id,
}
}
auth_header = {
"Authorization": f"Bearer {graph_token}",
}
assignment_guid = str(uuid4())
url = f"{self.sdk.cloud.endpoints.resource_manager}/providers/Microsoft.Management/managementGroups/{payload.management_group_id}/providers/Microsoft.Authorization/roleAssignments/{assignment_guid}?api-version=2015-07-01"
response = self.sdk.requests.put(url, headers=auth_header, json=request_body)
if response.ok:
return UserRoleCSPResult(**response.json())
else:
raise UserProvisioningException(
f"Failed to create user role assignment: {response.json()}"
)
def _extract_subscription_id(self, subscription_url):
sub_id_match = SUBSCRIPTION_ID_REGEX.match(subscription_url)
@ -1026,12 +1106,10 @@ class AzureCloudProvider(CloudProviderInterface):
def update_tenant_creds(self, tenant_id, secret: KeyVaultCredentials):
hashed = sha256_hex(tenant_id)
new_secrets = secret.dict()
curr_secrets = self._source_tenant_creds(tenant_id)
updated_secrets: Dict[str, Any] = {**curr_secrets.dict(), **new_secrets}
us = KeyVaultCredentials(**updated_secrets)
self.set_secret(hashed, json.dumps(us.dict()))
return us
updated_secrets = curr_secrets.merge_credentials(secret)
self.set_secret(hashed, json.dumps(updated_secrets.dict()))
return updated_secrets
def _source_tenant_creds(self, tenant_id) -> KeyVaultCredentials:
hashed = sha256_hex(tenant_id)
@ -1060,7 +1138,7 @@ class AzureCloudProvider(CloudProviderInterface):
"timeframe": "Custom",
"timePeriod": {"from": payload.from_date, "to": payload.to_date,},
"dataset": {
"granularity": "Daily",
"granularity": "Monthly",
"aggregation": {"totalCost": {"name": "PreTaxCost", "function": "Sum"}},
"grouping": [{"type": "Dimension", "name": "InvoiceId"}],
},

View File

@ -1,7 +1,7 @@
from typing import Dict
class CloudProviderInterface:
class CloudProviderInterface: # pragma: no cover
def set_secret(self, secret_key: str, secret_value: str):
raise NotImplementedError()

View File

@ -1,4 +1,5 @@
from uuid import uuid4
import pendulum
from .cloud_provider_interface import CloudProviderInterface
from .exceptions import (
@ -23,6 +24,10 @@ from .models import (
BillingProfileTenantAccessCSPResult,
BillingProfileVerificationCSPPayload,
BillingProfileVerificationCSPResult,
InitialMgmtGroupCSPPayload,
InitialMgmtGroupCSPResult,
InitialMgmtGroupVerificationCSPPayload,
InitialMgmtGroupVerificationCSPResult,
CostManagementQueryCSPResult,
CostManagementQueryProperties,
ProductPurchaseCSPPayload,
@ -280,6 +285,29 @@ class MockCloudProvider(CloudProviderInterface):
}
)
def create_initial_mgmt_group(self, payload: InitialMgmtGroupCSPPayload):
self._maybe_raise(self.NETWORK_FAILURE_PCT, self.NETWORK_EXCEPTION)
self._maybe_raise(self.SERVER_FAILURE_PCT, self.SERVER_EXCEPTION)
self._maybe_raise(self.UNAUTHORIZED_RATE, self.AUTHORIZATION_EXCEPTION)
return InitialMgmtGroupCSPResult(
id=f"{AZURE_MGMNT_PATH}{payload.management_group_name}",
)
def create_initial_mgmt_group_verification(
self, payload: InitialMgmtGroupVerificationCSPPayload
):
self._maybe_raise(self.NETWORK_FAILURE_PCT, self.NETWORK_EXCEPTION)
self._maybe_raise(self.SERVER_FAILURE_PCT, self.SERVER_EXCEPTION)
self._maybe_raise(self.UNAUTHORIZED_RATE, self.AUTHORIZATION_EXCEPTION)
return InitialMgmtGroupVerificationCSPResult(
**dict(
id="Test Id"
# id=f"{AZURE_MGMNT_PATH}{payload.management_group_name}"
)
)
def create_product_purchase(self, payload: ProductPurchaseCSPPayload):
self._maybe_raise(self.NETWORK_FAILURE_PCT, self.NETWORK_EXCEPTION)
self._maybe_raise(self.SERVER_FAILURE_PCT, self.SERVER_EXCEPTION)
@ -459,15 +487,26 @@ class MockCloudProvider(CloudProviderInterface):
self._maybe_raise(self.UNAUTHORIZED_RATE, self.AUTHORIZATION_EXCEPTION)
object_id = str(uuid4())
start_of_month = pendulum.today(tz="utc").start_of("month").replace(tzinfo=None)
this_month = start_of_month.to_atom_string()
last_month = start_of_month.subtract(months=1).to_atom_string()
two_months_ago = start_of_month.subtract(months=2).to_atom_string()
properties = CostManagementQueryProperties(
**dict(
columns=[
{"name": "PreTaxCost", "type": "Number"},
{"name": "UsageDate", "type": "Number"},
{"name": "BillingMonth", "type": "Datetime"},
{"name": "InvoiceId", "type": "String"},
{"name": "Currency", "type": "String"},
],
rows=[],
rows=[
[1.0, two_months_ago, "", "USD"],
[500.0, two_months_ago, "e05009w9sf", "USD"],
[50.0, last_month, "", "USD"],
[1000.0, last_month, "e0500a4qhw", "USD"],
[500.0, this_month, "", "USD"],
],
)
)

View File

@ -1,3 +1,4 @@
from enum import Enum
from secrets import token_urlsafe
from typing import Dict, List, Optional
from uuid import uuid4
@ -320,7 +321,7 @@ class ManagementGroupCSPPayload(AliasModel):
tenant_id: str
management_group_name: Optional[str]
display_name: str
parent_id: str
parent_id: Optional[str]
@validator("management_group_name", pre=True, always=True)
def supply_management_group_name_default(cls, name):
@ -340,16 +341,25 @@ class ManagementGroupCSPPayload(AliasModel):
@validator("parent_id", pre=True, always=True)
def enforce_parent_id_pattern(cls, id_):
if AZURE_MGMNT_PATH not in id_:
return f"{AZURE_MGMNT_PATH}{id_}"
else:
return id_
if id_:
if AZURE_MGMNT_PATH not in id_:
return f"{AZURE_MGMNT_PATH}{id_}"
else:
return id_
class ManagementGroupCSPResponse(AliasModel):
id: str
class ManagementGroupGetCSPPayload(BaseCSPPayload):
management_group_name: str
class ManagementGroupGetCSPResponse(AliasModel):
id: str
class ApplicationCSPPayload(ManagementGroupCSPPayload):
pass
@ -358,6 +368,22 @@ class ApplicationCSPResult(ManagementGroupCSPResponse):
pass
class InitialMgmtGroupCSPPayload(ManagementGroupCSPPayload):
pass
class InitialMgmtGroupCSPResult(ManagementGroupCSPResponse):
pass
class InitialMgmtGroupVerificationCSPPayload(ManagementGroupGetCSPPayload):
pass
class InitialMgmtGroupVerificationCSPResult(ManagementGroupGetCSPResponse):
pass
class EnvironmentCSPPayload(ManagementGroupCSPPayload):
pass
@ -417,6 +443,15 @@ class KeyVaultCredentials(BaseModel):
return values
def merge_credentials(
self, new_creds: "KeyVaultCredentials"
) -> "KeyVaultCredentials":
updated_creds = {k: v for k, v in new_creds.dict().items() if v}
old_creds = self.dict()
old_creds.update(updated_creds)
return KeyVaultCredentials(**old_creds)
class SubscriptionCreationCSPPayload(BaseCSPPayload):
display_name: str
@ -509,6 +544,21 @@ class UserCSPResult(AliasModel):
id: str
class UserRoleCSPPayload(BaseCSPPayload):
class Roles(str, Enum):
owner = "owner"
contributor = "contributor"
billing = "billing"
management_group_id: str
role: Roles
user_object_id: str
class UserRoleCSPResult(AliasModel):
id: str
class QueryColumn(AliasModel):
name: str
type: str

View File

@ -1,6 +1,6 @@
from collections import defaultdict
import json
from decimal import Decimal
import pendulum
def load_fixture_data():
@ -11,128 +11,25 @@ def load_fixture_data():
class MockReportingProvider:
FIXTURE_SPEND_DATA = load_fixture_data()
@classmethod
def get_portfolio_monthly_spending(cls, portfolio):
"""
returns an array of application and environment spending for the
portfolio. Applications and their nested environments are sorted in
alphabetical order by name.
[
{
name
this_month
last_month
total
environments [
{
name
this_month
last_month
total
}
]
}
]
"""
fixture_apps = cls.FIXTURE_SPEND_DATA.get(portfolio.name, {}).get(
"applications", []
)
def prepare_azure_reporting_data(rows: list):
"""
Returns a dict representing invoiced and estimated funds for a portfolio given
a list of rows from CostManagementQueryCSPResult.properties.rows
{
invoiced: Decimal,
estimated: Decimal
}
"""
for application in portfolio.applications:
if application.name not in [app["name"] for app in fixture_apps]:
fixture_apps.append({"name": application.name, "environments": []})
estimated = []
while rows:
if pendulum.parse(rows[-1][1]) >= pendulum.now(tz="utc").start_of("month"):
estimated.append(rows.pop())
else:
break
return sorted(
[
cls._get_application_monthly_totals(portfolio, fixture_app)
for fixture_app in fixture_apps
if fixture_app["name"]
in [application.name for application in portfolio.applications]
],
key=lambda app: app["name"],
)
@classmethod
def _get_environment_monthly_totals(cls, environment):
"""
returns a dictionary that represents spending totals for an environment e.g.
{
name
this_month
last_month
total
}
"""
return {
"name": environment["name"],
"this_month": sum(environment["spending"]["this_month"].values()),
"last_month": sum(environment["spending"]["last_month"].values()),
"total": sum(environment["spending"]["total"].values()),
}
@classmethod
def _get_application_monthly_totals(cls, portfolio, fixture_app):
"""
returns a dictionary that represents spending totals for an application
and its environments e.g.
{
name
this_month
last_month
total
environments: [
{
name
this_month
last_month
total
}
]
}
"""
application_envs = [
env
for env in portfolio.all_environments
if env.application.name == fixture_app["name"]
]
environments = [
cls._get_environment_monthly_totals(env)
for env in fixture_app["environments"]
if env["name"] in [e.name for e in application_envs]
]
for env in application_envs:
if env.name not in [env["name"] for env in environments]:
environments.append({"name": env.name})
return {
"name": fixture_app["name"],
"this_month": sum(env.get("this_month", 0) for env in environments),
"last_month": sum(env.get("last_month", 0) for env in environments),
"total": sum(env.get("total", 0) for env in environments),
"environments": sorted(environments, key=lambda env: env["name"]),
}
@classmethod
def get_spending_by_JEDI_clin(cls, portfolio):
"""
returns an dictionary of spending per JEDI CLIN for a portfolio
{
jedi_clin: {
invoiced
estimated
},
}
"""
if portfolio.name in cls.FIXTURE_SPEND_DATA:
CLIN_spend_dict = defaultdict(lambda: defaultdict(Decimal))
for application in cls.FIXTURE_SPEND_DATA[portfolio.name]["applications"]:
for environment in application["environments"]:
for clin, spend in environment["spending"]["this_month"].items():
CLIN_spend_dict[clin]["estimated"] += Decimal(spend)
for clin, spend in environment["spending"]["total"].items():
CLIN_spend_dict[clin]["invoiced"] += Decimal(spend)
return CLIN_spend_dict
return {}
return dict(
invoiced=Decimal(sum([row[0] for row in rows])),
estimated=Decimal(sum([row[0] for row in estimated])),
)

View File

@ -90,14 +90,18 @@ class EnvironmentRoles(object):
)
@classmethod
def get_environment_roles_pending_creation(cls) -> List[UUID]:
def get_pending_creation(cls) -> List[UUID]:
results = (
db.session.query(EnvironmentRole.id)
.join(Environment)
.join(ApplicationRole)
.filter(Environment.deleted == False)
.filter(EnvironmentRole.status == EnvironmentRole.Status.PENDING)
.filter(ApplicationRole.status == ApplicationRoleStatus.ACTIVE)
.filter(EnvironmentRole.deleted == False)
.filter(ApplicationRole.deleted == False)
.filter(ApplicationRole.cloud_id != None)
.filter(ApplicationRole.status != ApplicationRoleStatus.DISABLED)
.filter(EnvironmentRole.status != EnvironmentRole.Status.DISABLED)
.filter(EnvironmentRole.cloud_id.is_(None))
.all()
)
return [id_ for id_, in results]
@ -106,7 +110,7 @@ class EnvironmentRoles(object):
def disable(cls, environment_role_id):
environment_role = EnvironmentRoles.get_by_id(environment_role_id)
if environment_role.csp_user_id and not environment_role.environment.cloud_id:
if environment_role.cloud_id and not environment_role.environment.cloud_id:
tenant_id = environment_role.environment.application.portfolio.csp_data.get(
"tenant_id"
)

View File

@ -1,12 +1,13 @@
from flask import current_app
from itertools import groupby
from atst.domain.csp.cloud.models import (
ReportingCSPPayload,
CostManagementQueryCSPResult,
)
from atst.domain.csp.reports import prepare_azure_reporting_data
import pendulum
class Reports:
@classmethod
def monthly_spending(cls, portfolio):
return current_app.csp.reports.get_portfolio_monthly_spending(portfolio)
@classmethod
def expired_task_orders(cls, portfolio):
return [
@ -14,31 +15,19 @@ class Reports:
]
@classmethod
def obligated_funds_by_JEDI_clin(cls, portfolio):
clin_spending = current_app.csp.reports.get_spending_by_JEDI_clin(portfolio)
active_clins = portfolio.active_clins
for jedi_clin, clins in groupby(
active_clins, key=lambda clin: clin.jedi_clin_type
):
if not clin_spending.get(jedi_clin.name):
clin_spending[jedi_clin.name] = {}
clin_spending[jedi_clin.name]["obligated"] = sum(
clin.obligated_amount for clin in clins
)
def get_portfolio_spending(cls, portfolio):
# TODO: Extend this function to make from_date and to_date configurable
from_date = pendulum.now().subtract(years=1).add(days=1).format("YYYY-MM-DD")
to_date = pendulum.now().format("YYYY-MM-DD")
rows = []
output = []
for clin in clin_spending.keys():
invoiced = clin_spending[clin].get("invoiced", 0)
estimated = clin_spending[clin].get("estimated", 0)
obligated = clin_spending[clin].get("obligated", 0)
remaining = obligated - (invoiced + estimated)
output.append(
{
"name": clin,
"invoiced": invoiced,
"estimated": estimated,
"obligated": obligated,
"remaining": remaining,
}
if portfolio.csp_data:
payload = ReportingCSPPayload(
from_date=from_date, to_date=to_date, **portfolio.csp_data
)
return output
response: CostManagementQueryCSPResult = current_app.csp.cloud.get_reporting_data(
payload
)
rows = response.properties.rows
return prepare_azure_reporting_data(rows)

View File

@ -5,7 +5,7 @@ from flask import render_template
from jinja2 import contextfilter
from jinja2.exceptions import TemplateNotFound
from urllib.parse import urlparse, urlunparse, parse_qs, urlencode
from decimal import DivisionByZero as DivisionByZeroException
from decimal import DivisionByZero as DivisionByZeroException, InvalidOperation
def iconSvg(name):
@ -43,7 +43,7 @@ def obligatedFundingGraphWidth(values):
numerator, denominator = values
try:
return (numerator / denominator) * 100
except DivisionByZeroException:
except (DivisionByZeroException, InvalidOperation):
return 0

View File

@ -12,10 +12,12 @@ from atst.domain.csp.cloud.models import (
ApplicationCSPPayload,
EnvironmentCSPPayload,
UserCSPPayload,
UserRoleCSPPayload,
)
from atst.domain.environments import Environments
from atst.domain.environment_roles import EnvironmentRoles
from atst.domain.portfolios import Portfolios
from atst.models import JobFailure
from atst.models import CSPRole, JobFailure
from atst.domain.task_orders import TaskOrders
from atst.models.utils import claim_for_update, claim_many_for_update
from atst.queue import celery
@ -124,13 +126,46 @@ def do_create_environment(csp: CloudProviderInterface, environment_id=None):
payload = EnvironmentCSPPayload(
tenant_id=tenant_id, display_name=environment.name, parent_id=parent_id
)
env_result = csp.create_environment(payload)
environment.cloud_id = env_result.id
db.session.add(environment)
db.session.commit()
def do_create_environment_role(csp: CloudProviderInterface, environment_role_id=None):
env_role = EnvironmentRoles.get_by_id(environment_role_id)
with claim_for_update(env_role) as env_role:
if env_role.cloud_id is not None:
return
env = env_role.environment
csp_details = env.application.portfolio.csp_data
app_role = env_role.application_role
role = None
if env_role.role == CSPRole.ADMIN:
role = UserRoleCSPPayload.Roles.owner
elif env_role.role == CSPRole.BILLING_READ:
role = UserRoleCSPPayload.Roles.billing
elif env_role.role == CSPRole.CONTRIBUTOR:
role = UserRoleCSPPayload.Roles.contributor
payload = UserRoleCSPPayload(
tenant_id=csp_details.get("tenant_id"),
management_group_id=env.cloud_id,
user_object_id=app_role.cloud_id,
role=role,
)
result = csp.create_user_role(payload)
env_role.cloud_id = result.id
db.session.add(env_role)
db.session.commit()
# TODO: should send notification email to the user, maybe with their portal login name
def render_email(template_path, context):
return app.jinja_env.get_template(template_path).render(context)
@ -165,6 +200,16 @@ def create_user(self, application_role_ids=None):
)
@celery.task(bind=True, base=RecordFailure)
def create_environment_role(self, environment_role_id=None):
do_work(
do_create_environment_role,
self,
app.csp.cloud,
environment_role_id=environment_role_id,
)
@celery.task(bind=True, base=RecordFailure)
def create_environment(self, environment_id=None):
do_work(do_create_environment, self, app.csp.cloud, environment_id=environment_id)
@ -191,6 +236,12 @@ def dispatch_create_user(self):
create_user.delay(application_role_ids=application_role_ids)
@celery.task(bind=True)
def dispatch_create_environment_role(self):
for environment_role_id in EnvironmentRoles.get_pending_creation():
create_environment_role.delay(environment_role_id=environment_role_id)
@celery.task(bind=True)
def dispatch_create_environment(self):
for environment_id in Environments.get_environments_pending_creation(

View File

@ -36,7 +36,7 @@ class EnvironmentRole(
)
application_role = relationship("ApplicationRole")
csp_user_id = Column(String())
cloud_id = Column(String())
class Status(Enum):
PENDING = "pending"

View File

@ -24,6 +24,8 @@ class AzureStages(Enum):
TENANT_PRINCIPAL_CREDENTIAL = "tenant principal credential"
ADMIN_ROLE_DEFINITION = "admin role definition"
PRINCIPAL_ADMIN_ROLE = "tenant principal admin"
INITIAL_MGMT_GROUP = "initial management group"
INITIAL_MGMT_GROUP_VERIFICATION = "initial management group verification"
TENANT_ADMIN_OWNERSHIP = "tenant admin ownership"
TENANT_PRINCIPAL_OWNERSHIP = "tenant principial ownership"

View File

@ -94,6 +94,12 @@ class Portfolio(
def active_task_orders(self):
return [task_order for task_order in self.task_orders if task_order.is_active]
@property
def total_obligated_funds(self):
return sum(
(task_order.total_obligated_funds for task_order in self.active_task_orders)
)
@property
def funding_duration(self):
"""

View File

@ -157,37 +157,38 @@ class PortfolioStateMachine(
print(exc.json())
app.logger.info(payload)
self.fail_stage(stage)
else:
# TODO: Determine best place to do this, maybe @reconstructor
self.csp = app.csp.cloud
# TODO: Determine best place to do this, maybe @reconstructor
self.csp = app.csp.cloud
try:
func_name = f"create_{stage}"
response = getattr(self.csp, func_name)(payload_data)
if self.portfolio.csp_data is None:
self.portfolio.csp_data = {}
self.portfolio.csp_data.update(response.dict())
db.session.add(self.portfolio)
db.session.commit()
except PydanticValidationError as exc:
app.logger.error(
f"Failed to cast response to valid result class {self.__repr__()}:",
exc_info=1,
)
app.logger.info(exc.json())
print(exc.json())
app.logger.info(payload_data)
# TODO: Ensure that failing the stage does not preclude a Celery retry
self.fail_stage(stage)
# TODO: catch and handle general CSP exception here
except (ConnectionException, UnknownServerException) as exc:
app.logger.error(
f"CSP api call. Caught exception for {self.__repr__()}.",
exc_info=1,
)
# TODO: Ensure that failing the stage does not preclude a Celery retry
self.fail_stage(stage)
try:
func_name = f"create_{stage}"
response = getattr(self.csp, func_name)(payload_data)
if self.portfolio.csp_data is None:
self.portfolio.csp_data = {}
self.portfolio.csp_data.update(response.dict())
db.session.add(self.portfolio)
db.session.commit()
except PydanticValidationError as exc:
app.logger.error(
f"Failed to cast response to valid result class {self.__repr__()}:",
exc_info=1,
)
app.logger.info(exc.json())
print(exc.json())
app.logger.info(payload_data)
# TODO: Ensure that failing the stage does not preclude a Celery retry
self.fail_stage(stage)
# TODO: catch and handle general CSP exception here
except (ConnectionException, UnknownServerException) as exc:
app.logger.error(
f"CSP api call. Caught exception for {self.__repr__()}.", exc_info=1,
)
# TODO: Ensure that failing the stage does not preclude a Celery retry
self.fail_stage(stage)
self.finish_stage(stage)
self.finish_stage(stage)
def is_csp_data_valid(self, event):
"""

View File

@ -25,7 +25,6 @@ SORT_ORDERING = [
Status.DRAFT,
Status.UPCOMING,
Status.EXPIRED,
Status.UNSIGNED,
]
@ -148,7 +147,10 @@ class TaskOrder(Base, mixins.TimestampsMixin):
@property
def display_status(self):
return self.status.value
if self.status == Status.UNSIGNED:
return Status.DRAFT.value
else:
return self.status.value
@property
def portfolio_name(self):

View File

@ -23,6 +23,10 @@ def update_celery(celery, app):
"task": "atst.jobs.dispatch_create_user",
"schedule": 60,
},
"beat-dispatch_create_environment_role": {
"task": "atst.jobs.dispatch_create_environment_role",
"schedule": 60,
},
}
class ContextTask(celery.Task):

View File

@ -34,25 +34,25 @@ def create_portfolio():
@user_can(Permissions.VIEW_PORTFOLIO_REPORTS, message="view portfolio reports")
def reports(portfolio_id):
portfolio = Portfolios.get(g.current_user, portfolio_id)
spending = Reports.get_portfolio_spending(portfolio)
obligated = portfolio.total_obligated_funds
remaining = obligated - (spending["invoiced"] + spending["estimated"])
current_obligated_funds = Reports.obligated_funds_by_JEDI_clin(portfolio)
current_obligated_funds = {
**spending,
"obligated": obligated,
"remaining": remaining,
}
if any(map(lambda clin: clin["remaining"] < 0, current_obligated_funds)):
if current_obligated_funds["remaining"] < 0:
flash("insufficient_funds")
# wrapped in str() because the sum of obligated funds returns a Decimal object
total_portfolio_value = str(
sum(
task_order.total_obligated_funds
for task_order in portfolio.active_task_orders
)
)
return render_template(
"portfolios/reports/index.html",
portfolio=portfolio,
total_portfolio_value=total_portfolio_value,
# wrapped in str() because the sum of obligated funds returns a Decimal object
total_portfolio_value=str(portfolio.total_obligated_funds),
current_obligated_funds=current_obligated_funds,
expired_task_orders=Reports.expired_task_orders(portfolio),
monthly_spending=Reports.monthly_spending(portfolio),
retrieved=datetime.now(), # mocked datetime of reporting data retrival
)

View File

@ -1,9 +1,19 @@
[default]
ASSETS_URL
AZURE_AADP_QTY=5
AZURE_ACCOUNT_NAME
AZURE_STORAGE_KEY
AZURE_TO_BUCKET_NAME
AZURE_CLIENT_ID
AZURE_GRAPH_RESOURCE="https://graph.microsoft.com/"
AZURE_POLICY_LOCATION=policies
AZURE_POWERSHELL_CLIENT_ID
AZURE_ROLE_DEF_ID_BILLING_READER="fa23ad8b-c56e-40d8-ac0c-ce449e1d2c64"
AZURE_ROLE_DEF_ID_CONTRIBUTOR="b24988ac-6180-42a0-ab88-20f7382dd24c"
AZURE_ROLE_DEF_ID_OWNER="8e3af657-a8ff-443c-a75c-2fe8c4bcb635"
AZURE_SECRET_KEY
AZURE_STORAGE_KEY
AZURE_TENANT_ID
AZURE_TO_BUCKET_NAME
AZURE_VAULT_URL
BLOB_STORAGE_URL=http://localhost:8000/
CAC_URL = http://localhost:8000/login-redirect
CA_CHAIN = ssl/server-certs/ca-chain.pem
@ -38,14 +48,15 @@ PGUSER = postgres
PORT=8000
REDIS_HOST=localhost:6379
REDIS_PASSWORD
REDIS_SSLMODE
REDIS_TLS=False
REDIS_USER
SECRET_KEY = change_me_into_something_secret
SERVER_NAME
SESSION_COOKIE_NAME=atat
SESSION_COOKIE_DOMAIN
SESSION_KEY_PREFIX=session:
SESSION_COOKIE_NAME=atat
SESSION_COOKIE_SECURE=false
SESSION_KEY_PREFIX=session:
SESSION_TYPE = redis
SESSION_USE_SIGNER = True
SQLALCHEMY_ECHO = False

View File

@ -10,6 +10,5 @@ resources:
- volume-claim.yml
- nginx-client-ca-bundle.yml
- acme-challenges.yml
- aadpodidentity.yml
- nginx-snippets.yml
- autoscaling.yml

View File

@ -4,19 +4,30 @@ kind: ConfigMap
metadata:
name: atst-worker-envvars
data:
AZURE_ACCOUNT_NAME: jeditasksatat
CELERY_DEFAULT_QUEUE: celery-staging
SERVER_NAME: staging.atat.code.mil
FLASK_ENV: staging
PGDATABASE: cloudzero_jedidev_atat
PGHOST: 191.238.6.43
PGUSER: atat@cloudzero-jedidev-sql
PGSSLMODE: require
REDIS_HOST: 10.1.3.34:6380
SERVER_NAME: dev.atat.cloud.mil
---
apiVersion: v1
kind: ConfigMap
metadata:
name: atst-envvars
data:
ASSETS_URL: https://atat-cdn-staging.azureedge.net/
CDN_ORIGIN: https://staging.atat.code.mil
ASSETS_URL: ""
AZURE_ACCOUNT_NAME: jeditasksatat
CAC_URL: https://auth-dev.atat.cloud.mil
CDN_ORIGIN: https://dev.atat.cloud.mil
CELERY_DEFAULT_QUEUE: celery-staging
FLASK_ENV: staging
STATIC_URL: https://atat-cdn-staging.azureedge.net/static/
PGHOST: cloudzero-dev-sql.postgres.database.azure.com
REDIS_HOST: cloudzero-dev-redis.redis.cache.windows.net:6380
PGDATABASE: cloudzero_jedidev_atat
PGHOST: 191.238.6.43
PGUSER: atat@cloudzero-jedidev-sql
PGSSLMODE: require
REDIS_HOST: 10.1.3.34:6380
SESSION_COOKIE_DOMAIN: atat.cloud.mil

View File

@ -9,23 +9,19 @@ spec:
- name: nginx-secret
flexVolume:
options:
keyvaultname: "cloudzero-dev-keyvault"
# keyvaultobjectnames: "dhparam4096;cert;cert"
keyvaultobjectnames: "foo"
keyvaultobjectaliases: "FOO"
keyvaultobjecttypes: "secret"
usevmmanagedidentity: "true"
usepodidentity: "false"
usevmmanagedidentity: "true"
vmmanagedidentityclientid: $VMSS_CLIENT_ID
keyvaultname: "cz-jedidev-keyvault"
keyvaultobjectnames: "dhparam4096;ATATCERT;ATATCERT"
- name: flask-secret
flexVolume:
options:
keyvaultname: "cloudzero-dev-keyvault"
# keyvaultobjectnames: "AZURE-STORAGE-KEY;MAIL-PASSWORD;PGPASSWORD;REDIS-PASSWORD;SECRET-KEY"
keyvaultobjectnames: "master-PGPASSWORD"
keyvaultobjectaliases: "PGPASSWORD"
keyvaultobjecttypes: "secret"
usevmmanagedidentity: "true"
usepodidentity: "false"
usevmmanagedidentity: "true"
vmmanagedidentityclientid: $VMSS_CLIENT_ID
keyvaultname: "cz-jedidev-keyvault"
keyvaultobjectnames: "AZURE-STORAGE-KEY;MAIL-PASSWORD;PGPASSWORD;REDIS-PASSWORD;SECRET-KEY"
---
apiVersion: extensions/v1beta1
kind: Deployment
@ -38,10 +34,11 @@ spec:
- name: flask-secret
flexVolume:
options:
keyvaultname: "cloudzero-dev-keyvault"
keyvaultobjectnames: "AZURE-STORAGE-KEY;MAIL-PASSWORD;PGPASSWORD;REDIS-PASSWORD;SECRET-KEY"
usevmmanagedidentity: "true"
usepodidentity: "false"
usevmmanagedidentity: "true"
vmmanagedidentityclientid: $VMSS_CLIENT_ID
keyvaultname: "cz-jedidev-keyvault"
keyvaultobjectnames: "AZURE-STORAGE-KEY;MAIL-PASSWORD;PGPASSWORD;REDIS-PASSWORD;SECRET-KEY"
---
apiVersion: extensions/v1beta1
kind: Deployment
@ -54,10 +51,11 @@ spec:
- name: flask-secret
flexVolume:
options:
keyvaultname: "cloudzero-dev-keyvault"
keyvaultobjectnames: "AZURE-STORAGE-KEY;MAIL-PASSWORD;PGPASSWORD;REDIS-PASSWORD;SECRET-KEY"
usevmmanagedidentity: "true"
usepodidentity: "false"
usevmmanagedidentity: "true"
vmmanagedidentityclientid: $VMSS_CLIENT_ID
keyvaultname: "cz-jedidev-keyvault"
keyvaultobjectnames: "AZURE-STORAGE-KEY;MAIL-PASSWORD;PGPASSWORD;REDIS-PASSWORD;SECRET-KEY"
---
apiVersion: batch/v1beta1
kind: CronJob
@ -72,7 +70,8 @@ spec:
- name: flask-secret
flexVolume:
options:
keyvaultname: "cloudzero-dev-keyvault"
keyvaultobjectnames: "AZURE-STORAGE-KEY;MAIL-PASSWORD;PGPASSWORD;REDIS-PASSWORD;SECRET-KEY"
usevmmanagedidentity: "true"
usepodidentity: "false"
usevmmanagedidentity: "true"
vmmanagedidentityclientid: $VMSS_CLIENT_ID
keyvaultname: "cz-jedidev-keyvault"
keyvaultobjectnames: "AZURE-STORAGE-KEY;MAIL-PASSWORD;PGPASSWORD;REDIS-PASSWORD;SECRET-KEY"

View File

@ -1,9 +1,8 @@
namespace: staging
namespace: cloudzero-dev
bases:
- ../../azure/
resources:
- namespace.yml
- reset-cron-job.yml
patchesStrategicMerge:
- ports.yml
- envvars.yml

View File

@ -1,4 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: staging
name: cloudzero-dev

View File

@ -5,7 +5,7 @@ metadata:
name: atst-main
annotations:
service.beta.kubernetes.io/azure-load-balancer-internal: "true"
service.beta.kubernetes.io/azure-load-balancer-internal-subnet: "cloudzero-dev-public"
service.beta.kubernetes.io/azure-load-balancer-internal-subnet: "cloudzero-jedidev-public"
spec:
loadBalancerIP: ""
ports:
@ -22,7 +22,7 @@ metadata:
name: atst-auth
annotations:
service.beta.kubernetes.io/azure-load-balancer-internal: "true"
service.beta.kubernetes.io/azure-load-balancer-internal-subnet: "cloudzero-dev-public"
service.beta.kubernetes.io/azure-load-balancer-internal-subnet: "cloudzero-jedidev-public"
spec:
loadBalancerIP: ""
ports:

View File

@ -1,46 +0,0 @@
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: reset-db
namespace: atat
spec:
schedule: "0 4 * * *"
concurrencyPolicy: Replace
successfulJobsHistoryLimit: 1
jobTemplate:
spec:
template:
metadata:
labels:
app: atst
role: reset-db
aadpodidbinding: atat-kv-id-binding
spec:
restartPolicy: OnFailure
containers:
- name: reset
image: $CONTAINER_IMAGE
command: [
"/bin/sh", "-c"
]
args: [
"/opt/atat/atst/.venv/bin/python",
"/opt/atat/atst/script/reset_database.py"
]
envFrom:
- configMapRef:
name: atst-worker-envvars
volumeMounts:
- name: flask-secret
mountPath: "/config"
volumes:
- name: flask-secret
flexVolume:
driver: "azure/kv"
options:
usepodidentity: "true"
keyvaultname: "atat-vault-test"
keyvaultobjectnames: "staging-AZURE-STORAGE-KEY;staging-MAIL-PASSWORD;staging-PGPASSWORD;staging-REDIS-PASSWORD;staging-SECRET-KEY"
keyvaultobjectaliases: "AZURE_STORAGE_KEY;MAIL_PASSWORD;PGPASSWORD;REDIS_PASSWORD;SECRET_KEY"
keyvaultobjecttypes: "secret;secret;secret;secret;key"
tenantid: $TENANT_ID

View File

@ -0,0 +1,5 @@
namespace: cloudzero-dev
bases:
- ../../shared/
patchesStrategicMerge:
- migration.yaml

View File

@ -0,0 +1,16 @@
apiVersion: batch/v1
kind: Job
metadata:
name: migration
spec:
template:
spec:
volumes:
- name: flask-secret
flexVolume:
options:
usepodidentity: "false"
usevmmanagedidentity: "true"
vmmanagedidentityclientid: $VMSS_CLIENT_ID
keyvaultname: "cz-jedidev-keyvault"
keyvaultobjectnames: "AZURE-STORAGE-KEY;MAIL-PASSWORD;PGPASSWORD;REDIS-PASSWORD;SECRET-KEY"

View File

@ -0,0 +1,3 @@
namespace: atat
resources:
- migration.yaml

View File

@ -34,8 +34,10 @@ export default {
methods: {
next: function() {
this.submitted = true
if (this.validateFields()) {
this.step += 1
this.submitted = false
}
},
previous: function() {

View File

@ -1,5 +1,6 @@
import ExpandSidenavMixin from '../mixins/expand_sidenav'
import ToggleMixin from '../mixins/toggle'
import { sidenavCookieName } from '../lib/constants'
export default {
name: 'sidenav-toggler',
@ -14,7 +15,7 @@ export default {
toggle: function(e) {
e.preventDefault()
this.isVisible = !this.isVisible
document.cookie = this.cookieName + '=' + this.isVisible + '; path=/'
document.cookie = sidenavCookieName + '=' + this.isVisible + '; path=/'
this.$parent.$emit('sidenavToggle', this.isVisible)
},
},

View File

@ -5,6 +5,13 @@ export default {
mixins: [ToggleMixin],
props: {
defaultVisible: {
type: Boolean,
default: false,
},
},
methods: {
toggle: function(e) {
if (this.$el.contains(e.target)) {

1
js/lib/constants.js Normal file
View File

@ -0,0 +1 @@
export const sidenavCookieName = 'expandSidenav'

View File

@ -1,11 +1,12 @@
import { sidenavCookieName } from '../lib/constants'
export default {
props: {
cookieName: 'expandSidenav',
defaultVisible: {
type: Boolean,
default: function() {
if (document.cookie.match(this.cookieName)) {
return !!document.cookie.match(this.cookieName + ' *= *true')
if (document.cookie.match(sidenavCookieName)) {
return !!document.cookie.match(sidenavCookieName + ' *= *true')
} else {
return true
}

View File

@ -15,6 +15,7 @@ export default {
return {
changed: this.hasChanges,
valid: false,
submitted: false,
}
},
@ -36,15 +37,16 @@ export default {
handleSubmit: function(event) {
if (!this.valid) {
event.preventDefault()
this.submitted = true
}
},
},
computed: {
canSave: function() {
if (this.changed && this.valid) {
if (this.changed && this.valid && !this.submitted) {
return true
} else if (this.enableSave && this.valid) {
} else if (this.enableSave && this.valid && !this.submitted) {
return true
} else {
return false

View File

@ -16,16 +16,14 @@ from reset_database import reset_database
def database_setup(username, password, dbname, ccpo_users):
print("Applying schema and seeding roles and permissions.")
reset_database()
print(
f"Creating Postgres user role for '{username}' and granting all privileges to database '{dbname}'."
)
try:
_create_database_user(username, password, dbname)
except sqlalchemy.exc.ProgrammingError as err:
print(f"Postgres user role '{username}' already exists.")
_create_database_user(username, password, dbname)
print("Applying schema and seeding roles and permissions.")
reset_database()
print("Creating initial set of CCPO users.")
_add_ccpo_users(ccpo_users)
@ -47,6 +45,22 @@ def _create_database_user(username, password, dbname):
f"ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL PRIVILEGES ON FUNCTIONS TO {username}; \n"
)
try:
# TODO: make this more configurable
engine.execute(f"GRANT {username} TO azure_pg_admin;")
except sqlalchemy.exc.ProgrammingError as err:
print(f"Cannot grant new role {username} to azure_pg_admin")
for table in meta.tables:
engine.execute(f"ALTER TABLE {table} OWNER TO {username};\n")
sequence_results = engine.execute(
"SELECT c.relname FROM pg_class c WHERE c.relkind = 'S';"
).fetchall()
sequences = [p[0] for p in sequence_results]
for sequence in sequences:
engine.execute(f"ALTER SEQUENCE {sequence} OWNER TO {username};\n")
trans.commit()

View File

@ -13,6 +13,7 @@ SETTINGS=(
AUTH_DOMAIN
KV_MI_ID
KV_MI_CLIENT_ID
VMSS_CLIENT_ID
TENANT_ID
)

View File

@ -6,8 +6,12 @@
heading_tag="h2",
heading_classes="",
content_tag="div",
content_classes="") %}
<accordion v-cloak inline-template>
content_classes="",
default_visible=False) %}
<accordion
v-cloak
inline-template
v-bind:default-visible='{{ default_visible | string | lower }}'>
<{{wrapper_tag}} class="{{ wrapper_classes }}">
<{{heading_tag}} class="accordion__button {{ heading_classes }}">
<button

View File

@ -16,13 +16,12 @@
<th>PoP</th>
<th>CLIN Value</th>
<th>Amount Obligated</th>
<th>Amount Unspent</th>
</tr>
</thead>
<tbody>
{% for task_order in expired_task_orders %}
<tr>
<td colspan="5">
<td colspan="4">
<span class="h4 reporting-expended-funding__header">Task Order</span> <a href="{{ url_for("task_orders.view_task_order", task_order_id=task_order.id) }}">
{{ task_order.number }} {{ Icon("caret_right", classes="icon--tiny icon--blue" ) }}
</a>
@ -39,9 +38,8 @@
-
{{ clin.end_date | formattedDate(formatter="%b %d, %Y") }}
</td>
<td>{{ clin.total_amount | dollars }}</td>
<td>{{ clin.obligated_amount | dollars }}</td>
<td>{{ 0 | dollars }}</td>
<td class="table-cell--align-right">{{ clin.total_amount | dollars }}</td>
<td class="table-cell--align-right">{{ clin.obligated_amount | dollars }}</td>
<tr>
{% endfor %}
{% endfor %}

View File

@ -13,7 +13,5 @@
<hr>
{% include "portfolios/reports/obligated_funds.html" %}
{% include "portfolios/reports/expired_task_orders.html" %}
<hr>
{% include "portfolios/reports/application_and_env_spending.html" %}
</div>
{% endblock %}

View File

@ -7,61 +7,56 @@
</header>
<div class='panel'>
<div class='panel__content jedi-clin-funding'>
{% for JEDI_clin in current_obligated_funds | sort(attribute='name')%}
<div class="jedi-clin-funding__clin-wrapper">
<h3 class="h5 jedi-clin-funding__header">
{{ "JEDICLINType.{}".format(JEDI_clin.name) | translate }}
</h3>
<p class="jedi-clin-funding__subheader">Total obligated amount: {{ JEDI_clin.obligated | dollars }}</p>
<div class="jedi-clin-funding__graph">
{% if JEDI_clin.remaining < 0 %}
<span style="width:100%" class="jedi-clin-funding__graph-bar jedi-clin-funding__graph-bar--insufficient"></span>
{% else %}
{% set invoiced_width = (JEDI_clin.invoiced, JEDI_clin.obligated) | obligatedFundingGraphWidth %}
{% if invoiced_width %}
<span style="width:{{ invoiced_width }}%"
class="jedi-clin-funding__graph-bar jedi-clin-funding__graph-bar--invoiced">
</span>
{% endif %}
{% set estimated_width = (JEDI_clin.estimated, JEDI_clin.obligated) | obligatedFundingGraphWidth %}
{% if estimated_width %}
<span style="width:{{ (JEDI_clin.estimated, JEDI_clin.obligated) | obligatedFundingGraphWidth }}%"
class="jedi-clin-funding__graph-bar jedi-clin-funding__graph-bar--estimated">
</span>
{% endif %}
<span style="width:{{ (JEDI_clin.remaining, JEDI_clin.obligated) | obligatedFundingGraphWidth }}%"
class="jedi-clin-funding__graph-bar jedi-clin-funding__graph-bar--remaining">
<div class="jedi-clin-funding__clin-wrapper">
<h3 class="h5 jedi-clin-funding__header">
Total obligated amount: {{ current_obligated_funds.obligated | dollars }}
</h3>
<div class="jedi-clin-funding__graph">
{% if current_obligated_funds.remaining < 0 %}
<span style="width:100%" class="jedi-clin-funding__graph-bar jedi-clin-funding__graph-bar--insufficient"></span>
{% else %}
{% set invoiced_width = (current_obligated_funds.invoiced, current_obligated_funds.obligated) | obligatedFundingGraphWidth %}
{% if invoiced_width %}
<span style="width:{{ invoiced_width }}%"
class="jedi-clin-funding__graph-bar jedi-clin-funding__graph-bar--invoiced">
</span>
{% endif %}
{% set estimated_width = (current_obligated_funds.estimated, current_obligated_funds.obligated) | obligatedFundingGraphWidth %}
{% if estimated_width %}
<span style="width:{{ (current_obligated_funds.estimated, current_obligated_funds.obligated) | obligatedFundingGraphWidth }}%"
class="jedi-clin-funding__graph-bar jedi-clin-funding__graph-bar--estimated">
</span>
{% endif %}
<span style="width:{{ (current_obligated_funds.remaining, current_obligated_funds.obligated) | obligatedFundingGraphWidth }}%"
class="jedi-clin-funding__graph-bar jedi-clin-funding__graph-bar--remaining">
</span>
{% endif %}
</div>
<div class="jedi-clin-funding__graph-values">
<div class="jedi-clin-funding__meta">
<p class="jedi-clin-funding__meta-header">
<span class="jedi-clin-funding__meta-key jedi-clin-funding__meta-key--invoiced"></span>
Invoiced expended funds:
</p>
<p class="h3 jedi-clin-funding__meta-value">{{ current_obligated_funds.invoiced | dollars }}</p>
</div>
<div class="jedi-clin-funding__graph-values">
<div class="jedi-clin-funding__meta">
<p class="jedi-clin-funding__meta-header">
<span class="jedi-clin-funding__meta-key jedi-clin-funding__meta-key--invoiced"></span>
Invoiced expended funds:
</p>
<p class="h3 jedi-clin-funding__meta-value">{{ JEDI_clin.invoiced | dollars }}</p>
</div>
<div class="jedi-clin-funding__meta">
<p class="jedi-clin-funding__meta-header">
<span class="jedi-clin-funding__meta-key jedi-clin-funding__meta-key--estimated"></span>
Estimated expended funds:
</p>
<p class="h3 jedi-clin-funding__meta-value">{{ JEDI_clin.estimated | dollars }}</p>
</div>
<div class="jedi-clin-funding__meta">
<p class="jedi-clin-funding__meta-header">
<span class="jedi-clin-funding__meta-key jedi-clin-funding__meta-key--{{"remaining" if JEDI_clin.remaining > 0 else "insufficient"}}"></span>
Remaining funds:
</p>
<p class="h3 jedi-clin-funding__meta-value {% if JEDI_clin.remaining < 0 %}text-danger{% endif %}">{{ JEDI_clin.remaining | dollars }}</p>
</div>
<div class="jedi-clin-funding__meta">
<p class="jedi-clin-funding__meta-header">
<span class="jedi-clin-funding__meta-key jedi-clin-funding__meta-key--estimated"></span>
Estimated expended funds:
</p>
<p class="h3 jedi-clin-funding__meta-value">{{ current_obligated_funds.estimated | dollars }}</p>
</div>
<div class="jedi-clin-funding__meta">
<p class="jedi-clin-funding__meta-header">
<span class="jedi-clin-funding__meta-key jedi-clin-funding__meta-key--{{"remaining" if current_obligated_funds.remaining > 0 else "insufficient"}}"></span>
Remaining funds:
</p>
<p class="h3 jedi-clin-funding__meta-value {% if current_obligated_funds.remaining < 0 %}text-danger{% endif %}">{{ current_obligated_funds.remaining | dollars }}</p>
</div>
</div>
{% endfor %}
</div>
<div class="jedi-clin-funding__active-task-orders">
<h3 class="h4">
Active Task Orders

View File

@ -14,9 +14,15 @@
{% macro TaskOrderList(task_orders, status) %}
{% set show_task_orders = task_orders|length > 0 %}
<div class="accordion">
{% call Accordion(title=("task_orders.status_list_title"|translate({'status': status})), id=status, heading_tag="h4") %}
{% if task_orders|length > 0 %}
{% call Accordion(
title=("task_orders.status_list_title"|translate({'status': status})),
id=status,
heading_tag="h4",
default_visible=show_task_orders
) %}
{% if show_task_orders %}
{% for task_order in task_orders %}
{% set to_number %}
{% if task_order.number != None %}

View File

@ -1,2 +0,0 @@
.terraform
.vscode/

View File

@ -1,305 +0,0 @@
# ATAT Terraform
Welcome! You've found the ATAT IaC configurations.
ATAT is configured using terraform and a wrapper script called `secrets-tool`. With `terraform` we can configure infrastructure in a programatic way and ensure consistency across environments.
## Directory Structure
**modules/** - Terraform modules. These are modules that can be re-used for multiple environments.
**providers/** - Specific environment configurations. (dev,production, etc)
# Setup
Install the following requirements.
I highly recommend [tfenv](https://github.com/tfutils/tfenv) which will help you manage versions of TF and install new ones as needed. It gives you the ability to switch back and forth between versions as necessary, especially when doing upgrades and managing multiple environments. Think of it like `pyenv`.
Python is required for the `secrets-tool`. It is used to wrap terraform and pass secrets in to terraform from Azure KeyVault. This approach avoids leaving secrets on the filesystem in any way and allow for restricting access to secrets to specific operators.
Azure CLI is necessary for creating some intial resources, but is also used by the Python Azure SDK to make calls in some cases.
Requirements:
- [tfenv](https://github.com/tfutils/tfenv)
- Python 3.7
- Python pip
- Python virtualenv # FIXME: Switch to `pipenv`
- [azure cli](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest)
- [powershell](https://docs.microsoft.com/en-us/powershell/scripting/install/installing-powershell?view=powershell-6) See below
# tfenv
`tfenv` will allow you to install TF versions. For example.
```
tfenv install 0.12.18
```
_0.12.18 at time of writing_
To select a version to use
```
tfenv use 0.12.18
```
# Powershell
Some things you need to use powershell. Specifically getting client profiles for the VPN.
## Install powershell on Linux
Powershell on recent versions of Ubuntu is available through snap.
For Ubuntu 19.10
```
snap install powershell --classic
```
# Preview Features
To create all the resources we need for this environment we'll need to enable some _Preview_ features.
This registers the specific feature for _SystemAssigned_ principals
```
az feature register --namespace Microsoft.ContainerService --name MSIPreview
az feature register --namespace Microsoft.ContainerService --name NodePublicIPPreview
```
To apply the registration, run the following
```
az provider register -n Microsoft.ContainerService
```
# Running Terraform
First, you'll need to log in to Azure. With the Azure CLI installed, you can run the following.
```
az login
```
Next, you'll need to initialize the environment. This process pulls down the terraform provider module from github as well as pulls in the modules that will be used by this provider/environment setup.
```
cd providers/dev/
terraform init
```
Once initialized, you can run a plan. A `plan` compares the terraform definitions you have configured in the provider directory (Ex. `providers/dev`) with what is in the shared state file in the Azure Object Storage (which all providers are currently configured for). This then also compares it to the state of the services which are running in Azure.
If nothing has been applied, you'll see all the resources defined in terraform as all new with a `+` next to the resource name. If the resource exists, but has changed, you'll see a `~` next to the resource and the delta of the change to be applied.
If you're plan looks good, you can run the apply.
```
terraform apply
```
Check the output for errors. Sometimes the syntax is valid, but some of the configuration may be wrong and only rejected by the Azure API at run time. If this is the case, fix your mistake, and re-run.
# After running TF (Manual Steps)
## VM Scale Set
After running terraform, we need to make a manual change to the VM Scale Set that is used in the kubernetes. Terraform has a bug that is not applying this as of `v1.40` of the `azurerm` provider.
In order to get the `SystemAssigned` identity to be set, it needs to be set manually in the console.
Navigate to the VM Scale Set for the k8s cluster you're managing (in the console).
![SystemAssigned Identity](images/system-assigned.png)
_Just click the `Status` to `On`_
## KeyVault Policy
There is a bug (missing feature really) in the `azurerm` terraform provider which exposes the wrong `object_id/principal_id` in the `azurerm_kubernetes_cluster` output. The `id` that it exposes is the `object_id` of the cluster itself, and _not_ the Virtual Machine Scale Set SystemAssigned identity. This needs to be updated manually after running terraform for the first time.
To update, just edit the `keyvault.tf`. Set the `principal_id` to the `object_id` of the Virtual Machine Scale set. This can be found in the Azure portal, or via cli.
```
az vmss list
```
In that list, find the scale set for the k8s cluster you're working on. You'll want the value of `principal_id`.
The error looks like the following
```
Warning FailedMount 8s (x6 over 25s) kubelet, aks-default-54410534-vmss000001 MountVolume.SetUp failed for volume "flask-secret" : mount command failed, status: Failure, reason: /etc/kubernetes/volumeplugins/azure~kv/azurekeyvault-flex
volume failed, Access denied. Caller was not found on any access policy. r nCaller: appid=e6651156-7127-432d-9617-4425177c48f1;oid=f9bcbe58-8b73-4957-aee2-133dc3e58063;numgroups=0;iss=https://sts.windows.net/b5ab0e1e-09f8-4258-afb7-fb17654bc5
b3/ r nVault: cloudzero-dev-keyvault;location=eastus2 InnerError={code:AccessDenied}
```
Final configuration will look like this.
**keyvault.tf**
```
module "keyvault" {
source = "../../modules/keyvault"
name = var.name
region = var.region
owner = var.owner
environment = var.environment
tenant_id = var.tenant_id
principal_id = "f9bcbe58-8b73-4957-aee2-133dc3e58063"
}
```
## Setting the Redis key in KeyVault
Redis auth is provided by a simple key that is randomly generated by Azure. This is a simple task for `secrets-tool`.
First, get the key from the portal. You can navigate to the redis cluster, and click on either "Show Keys", or "Access Keys"
![Redis Keys](images/redis-keys.png)
In order to set the secret, make sure you specify the keyvault that is used by the application. In dev, its simply called "keyvault", where the operator keyvault has a different name.
```
secrets-tool secrets --keyvault https://cloudzero-dev-keyvault.vault.azure.net/ create --key REDIS-PASSWORD --value "<redis key>"
```
You'll see output similar to the following if it was successful
```
2020-01-17 14:04:42,996 - utils.keyvault.secrets - DEBUG - Set value for key: REDIS-PASSWORD
```
## Setting the Azure Storage Key
Azure storage is very similar to how Redis has a generated key. This generated key is what is used at the time of writing this doc.
Grab the key from the "Access Keys" tab on the cloud storage bucket
![Cloud Storage Keys](images/azure-storage.png)
Now create the secret in KeyVault. This secret should also be in the application specific KeyVault.
```
secrets-tool secrets --keyvault https://cloudzero-dev-keyvault.vault.azure.net/ create --key AZURE-STORAGE-KEY --value "<storage key>"
```
You'll see output similar to the following if it was successful
```
2020-01-17 14:14:59,426 - utils.keyvault.secrets - DEBUG - Set value for key: AZURE-STORAGE-KEY
```
# Shutting down and environment
To shutdown and remove an environment completely as to not incur any costs you would need to run a `terraform destroy`.
```
terraform destroy
```
**This will destroy all resources defined in the provider so use with caution!! This will include things like KeyVault, Postgres, and so on. You may lose data!!**
# Advanced Terraform
## Targeted Apply
Sometimes you're writing a new module and don't want to make changes to anything else. In this case you can limit what TF changes.
```
terraform plan -target=module.vpc
```
In the above example, this will only run a plan (plan/apply/destroy) on the specific module. This can be a module, or resource. You can get a list of module and resources by running `terraform show`.
# VPN Setup
[Configure OpenVPN clients for Azure VPN Gateway](https://docs.microsoft.com/en-us/azure/vpn-gateway/vpn-gateway-howto-openvpn-clients#before-you-begin)
[About P2S VPN client profiles](https://docs.microsoft.com/en-us/azure/vpn-gateway/about-vpn-profile-download)
[Configure a VPN client for P2S OpenVPN protocol connections: Azure AD authentication (Preview)](https://docs.microsoft.com/en-us/azure/vpn-gateway/openvpn-azure-ad-client)
[Create an Azure Active Directory tenant for P2S OpenVPN protocol connections](https://docs.microsoft.com/en-us/azure/vpn-gateway/openvpn-azure-ad-tenant)
The docs above should help with client configuration. The last doc (Create an Azure Active Directory..) is necessary to run the command to add the VPN app for AD.
Copied here for convenience. Just enter this in your browser.
```
# For Public Azure - Government has a different URL, see doc above
https://login.microsoftonline.com/common/oauth2/authorize?client_id=41b23e61-6c1e-4545-b367-cd054e0ed4b4&response_type=code&redirect_uri=https://portal.azure.com&nonce=1234&prompt=admin_consent
```
## Adding a client
TODO
## Downloading a client profile
TODO
# Quick Steps
Copy paste (mostly)
*Register Preview features*
See [Registering Features](#Preview_Features)
*Edit provider.tf and turn off remote bucket temporarily (comment out backend {} section)*
```
provider "azurerm" {
version = "=1.40.0"
}
provider "azuread" {
# Whilst version is optional, we /strongly recommend/ using it to pin the version of the Provider being used
version = "=0.7.0"
}
terraform {
#backend "azurerm" {
#resource_group_name = "cloudzero-dev-tfstate"
#storage_account_name = "cloudzerodevtfstate"
#container_name = "tfstate"
#key = "dev.terraform.tfstate"
#}
}
```
`terraform init`
`terraform plan -target=module.tf_state`
Ensure the state bucket is created.
*create the container in the portal (or cli).*
This simply involves going to the bucket in the azure portal and creating the container.
Now is the tricky part. For this, we will be switching from local state (files) to remote state (stored in the azure bucket)
Uncomment the `backend {}` section in the `provider.tf` file. Once uncommented, we will re-run the init. This will attempt to copy the local state to the remote bucket.
`terraform init`
*Say `yes` to the question*
Now we need to update the Update `variables.tf` with the principals for the users in `admin_users` variable map. If these are not defined yet, just leave it as an empty set.
Next, we'll create the operator keyvault.
`terraform plan -target=module.operator_keyvault`
Next, we'll pre-populate some secrets using the secrets-tool. Follow the install/setup section in the README.md first. Then populate the secrets with a definition file as described in the following link.
https://github.com/dod-ccpo/atst/tree/staging/terraform/secrets-tool#populating-secrets-from-secrets-definition-file
*Create service principal for AKS*
```
az ad sp create-for-rbac
```
Take note of the output, you'll need it in the next step to store the secret and `client_id` in keyvault.
This also involves using secrets-tool. Substitute your keyvault url.
```
secrets-tool secrets --keyvault https://ops-jedidev-keyvault.vault.azure.net/ create --key k8s-client-id --value [value]
secrets-tool secrets --keyvault https://ops-jedidev-keyvault.vault.azure.net/ create --key k8s-client-secret --value [value]
```
*Next we'll apply the rest of the TF configuration*
`terraform plan` # Make sure this looks correct
`terraform apply`
*[Configure AD for MFA](https://docs.microsoft.com/en-us/azure/vpn-gateway/openvpn-azure-ad-mfa)*
*Then we need an instance of the container*
Change directories to the repo root. Ensure that you've checked out the staging or master branch:
`docker build . --build-arg CSP=azure -f ./Dockerfile -t atat:latest`
*Create secrets for ATAT database user*
Change directories back to terraform/secrets-tool. There is a sample file there. Make sure you know the URL for the aplication Key Vault (distinct from the operator Key Vault). Run:
`secrets-tool secrets --keyvault [application key vault URL] load -f ./postgres-user.yaml
*Create the database, database user, schema, and initial data set*
This is discussed in more detail [here](https://github.com/dod-ccpo/atst/tree/staging/terraform/secrets-tool#setting-up-the-initial-atat-database). Be sure to read the requirements section.
```
secrets-tool database --keyvault [operator key vault URL] provision --app-keyvault [application key vault URL] --dbname jedidev-atat --dbhost [database host name] --ccpo-users /full/path/to/users.yml
```

Binary file not shown.

Before

Width:  |  Height:  |  Size: 325 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 249 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 229 KiB

View File

@ -1,40 +0,0 @@
resource "azurerm_resource_group" "bucket" {
name = "${var.name}-${var.environment}-${var.service_name}"
location = var.region
}
resource "azurerm_storage_account" "bucket" {
name = var.service_name
resource_group_name = azurerm_resource_group.bucket.name
location = azurerm_resource_group.bucket.location
account_tier = "Standard"
account_replication_type = "LRS"
}
resource "azurerm_storage_account_network_rules" "acls" {
resource_group_name = azurerm_resource_group.bucket.name
storage_account_name = azurerm_storage_account.bucket.name
default_action = var.policy
# Azure Storage CIDR ACLs do not accept /32 CIDR ranges.
ip_rules = [
for cidr in values(var.whitelist) : cidr
]
virtual_network_subnet_ids = var.subnet_ids
bypass = ["AzureServices"]
}
resource "azurerm_storage_container" "bucket" {
name = "content"
storage_account_name = azurerm_storage_account.bucket.name
container_access_type = var.container_access_type
}
# Added until requisite TF bugs are fixed. Typically this would be configured in the
# storage_account resource
resource "null_resource" "retention" {
provisioner "local-exec" {
command = "az storage logging update --account-name ${azurerm_storage_account.bucket.name} --log rwd --services bqt --retention 90"
}
}

View File

@ -1,48 +0,0 @@
variable "region" {
type = string
description = "Region this module and resources will be created in"
}
variable "name" {
type = string
description = "Unique name for the services in this module"
}
variable "environment" {
type = string
description = "Environment these resources reside (prod, dev, staging, etc)"
}
variable "owner" {
type = string
description = "Owner of the environment and resources created in this module"
}
variable "container_access_type" {
default = "private"
description = "Access type for the container (Default: private)"
type = string
}
variable "service_name" {
description = "Name of the service using this bucket"
type = string
}
variable "subnet_ids" {
description = "List of subnet_ids that will have access to this service"
type = list
}
variable "policy" {
description = "The default policy for the network access rules (Allow/Deny)"
default = "Deny"
type = string
}
variable "whitelist" {
type = map
description = "A map of whitelisted IPs and CIDR ranges. For single IPs, Azure expects just the IP, NOT a /32."
default = {}
}

View File

@ -1,43 +0,0 @@
resource "random_id" "server" {
keepers = {
azi_id = 1
}
byte_length = 8
}
resource "azurerm_resource_group" "cdn" {
name = "${var.name}-${var.environment}-cdn"
location = var.region
}
resource "azurerm_cdn_profile" "cdn" {
name = "${var.name}-${var.environment}-profile"
location = azurerm_resource_group.cdn.location
resource_group_name = azurerm_resource_group.cdn.name
sku = var.sku
}
resource "azurerm_cdn_endpoint" "cdn" {
name = "${var.name}-${var.environment}-${random_id.server.hex}"
profile_name = azurerm_cdn_profile.cdn.name
location = azurerm_resource_group.cdn.location
resource_group_name = azurerm_resource_group.cdn.name
origin {
name = "${var.name}-${var.environment}-origin"
host_name = var.origin_host_name
}
}
resource "azurerm_monitor_diagnostic_setting" "acr_diagnostic" {
name = "${var.name}-${var.environment}-acr-diag"
target_resource_id = azurerm_cdn_endpoint.cdn.id
log_analytics_workspace_id = var.workspace_id
log {
category = "CoreAnalytics"
retention_policy {
enabled = true
}
}
}

View File

@ -1,35 +0,0 @@
variable "region" {
type = string
description = "Region this module and resources will be created in"
}
variable "name" {
type = string
description = "Unique name for the services in this module"
}
variable "environment" {
type = string
description = "Environment these resources reside (prod, dev, staging, etc)"
}
variable "owner" {
type = string
description = "Owner of the environment and resources created in this module"
}
variable "sku" {
type = string
description = "SKU of which CDN to use"
default = "Standard_Verizon"
}
variable "origin_host_name" {
type = string
description = "Subdomain to use for the origin in requests to the CDN"
}
variable "workspace_id" {
description = "Log Analytics Workspace ID for sending logs generated by this resource"
type = string
}

View File

@ -1,67 +0,0 @@
locals {
whitelist = values(var.whitelist)
}
resource "azurerm_resource_group" "acr" {
name = "${var.name}-${var.environment}-acr"
location = var.region
}
resource "azurerm_container_registry" "acr" {
name = "${var.name}${var.environment}registry" # Alpha Numeric Only
resource_group_name = azurerm_resource_group.acr.name
location = azurerm_resource_group.acr.location
sku = var.sku
admin_enabled = var.admin_enabled
#georeplication_locations = [azurerm_resource_group.acr.location, var.backup_region]
network_rule_set {
default_action = var.policy
ip_rule = [
for cidr in values(var.whitelist) : {
action = "Allow"
ip_range = cidr
}
]
# Dynamic rule should work, but doesn't - See https://github.com/hashicorp/terraform/issues/22340#issuecomment-518779733
#dynamic "ip_rule" {
# for_each = values(var.whitelist)
# content {
# action = "Allow"
# ip_range = ip_rule.value
# }
#}
virtual_network = [
for subnet in var.subnet_ids : {
action = "Allow"
subnet_id = subnet
}
]
}
}
resource "azurerm_monitor_diagnostic_setting" "acr_diagnostic" {
name = "${var.name}-${var.environment}-acr-diag"
target_resource_id = azurerm_container_registry.acr.id
log_analytics_workspace_id = var.workspace_id
log {
category = "ContainerRegistryRepositoryEvents"
retention_policy {
enabled = true
}
}
log {
category = "ContainerRegistryLoginEvents"
retention_policy {
enabled = true
}
}
metric {
category = "AllMetrics"
retention_policy {
enabled = true
}
}
}

View File

@ -1,59 +0,0 @@
variable "region" {
type = string
description = "Region this module and resources will be created in"
}
variable "name" {
type = string
description = "Unique name for the services in this module"
}
variable "environment" {
type = string
description = "Environment these resources reside (prod, dev, staging, etc)"
}
variable "owner" {
type = string
description = "Owner of the environment and resources created in this module"
}
variable "backup_region" {
type = string
description = "Backup region for georeplicating the container registry"
}
variable "sku" {
type = string
description = "SKU to use for the container registry service"
default = "Premium"
}
variable "admin_enabled" {
type = string
description = "Admin enabled? (true/false default: false)"
default = false
}
variable "subnet_ids" {
description = "List of subnet_ids that will have access to this service"
type = list
}
variable "policy" {
description = "The default policy for the network access rules (Allow/Deny)"
default = "Deny"
type = string
}
variable "whitelist" {
type = map
description = "A map of whitelisted IPs and CIDR ranges. For single IPs, Azure expects just the IP, NOT a /32."
default = {}
}
variable "workspace_id" {
description = "The Log Analytics Workspace ID"
type = string
}

View File

@ -1,89 +0,0 @@
resource "azurerm_resource_group" "k8s" {
name = "${var.name}-${var.environment}-vpc"
location = var.region
}
resource "azurerm_kubernetes_cluster" "k8s" {
name = "${var.name}-${var.environment}-k8s"
location = azurerm_resource_group.k8s.location
resource_group_name = azurerm_resource_group.k8s.name
dns_prefix = var.k8s_dns_prefix
service_principal {
client_id = var.client_id
client_secret = var.client_secret
}
default_node_pool {
name = "default"
vm_size = "Standard_D1_v2"
os_disk_size_gb = 30
vnet_subnet_id = var.vnet_subnet_id
enable_node_public_ip = true # Nodes need a public IP for external resources. FIXME: Switch to NAT Gateway if its available in our subscription
enable_auto_scaling = var.enable_auto_scaling
max_count = var.max_count # FIXME: if auto_scaling disabled, set to 0
min_count = var.min_count # FIXME: if auto_scaling disabled, set to 0
}
identity {
type = "SystemAssigned"
}
lifecycle {
ignore_changes = [
default_node_pool.0.node_count
]
}
tags = {
environment = var.environment
owner = var.owner
}
}
resource "azurerm_monitor_diagnostic_setting" "k8s_diagnostic-1" {
name = "${var.name}-${var.environment}-k8s-diag"
target_resource_id = azurerm_kubernetes_cluster.k8s.id
log_analytics_workspace_id = var.workspace_id
log {
category = "kube-apiserver"
retention_policy {
enabled = true
}
}
log {
category = "kube-controller-manager"
retention_policy {
enabled = true
}
}
log {
category = "kube-scheduler"
retention_policy {
enabled = true
}
}
log {
category = "kube-audit"
retention_policy {
enabled = true
}
}
log {
category = "cluster-autoscaler"
retention_policy {
enabled = true
}
}
metric {
category = "AllMetrics"
retention_policy {
enabled = true
}
}
}
resource "azurerm_role_assignment" "k8s_network_contrib" {
scope = var.vnet_id
role_definition_name = "Network Contributor"
principal_id = azurerm_kubernetes_cluster.k8s.identity[0].principal_id
}

View File

@ -1,3 +0,0 @@
output "principal_id" {
value = azurerm_kubernetes_cluster.k8s.identity[0].principal_id
}

View File

@ -1,74 +0,0 @@
variable "region" {
type = string
description = "Region this module and resources will be created in"
}
variable "name" {
type = string
description = "Unique name for the services in this module"
}
variable "environment" {
type = string
description = "Environment these resources reside (prod, dev, staging, etc)"
}
variable "owner" {
type = string
description = "Owner of the environment and resources created in this module"
}
variable "k8s_dns_prefix" {
type = string
description = "A DNS prefix"
}
variable "k8s_node_size" {
type = string
description = "The size of the instance to use in the node pools for k8s"
default = "Standard_A1_v2"
}
variable "vnet_subnet_id" {
description = "Subnet to use for the default k8s pool"
type = string
}
variable "enable_auto_scaling" {
default = false
type = string
description = "Enable or disable autoscaling (Default: false)"
}
variable "max_count" {
default = 1
type = string
description = "Maximum number of nodes to use in autoscaling. This requires `enable_auto_scaling` to be set to true"
}
variable "min_count" {
default = 1
type = string
description = "Minimum number of nodes to use in autoscaling. This requires `enable_auto_scaling` to be set to true"
}
variable "client_id" {
type = string
description = "The client ID for the Service Principal associated with the AKS cluster."
}
variable "client_secret" {
type = string
description = "The client secret for the Service Principal associated with the AKS cluster."
}
variable "workspace_id" {
description = "Log Analytics workspace for this resource to log to"
type = string
}
variable "vnet_id" {
description = "The ID of the VNET that the AKS cluster app registration needs to provision load balancers in"
type = string
}

View File

@ -1,101 +0,0 @@
data "azurerm_client_config" "current" {}
resource "azurerm_resource_group" "keyvault" {
name = "${var.name}-${var.environment}-keyvault"
location = var.region
}
resource "azurerm_key_vault" "keyvault" {
name = "${var.name}-${var.environment}-keyvault"
location = azurerm_resource_group.keyvault.location
resource_group_name = azurerm_resource_group.keyvault.name
tenant_id = data.azurerm_client_config.current.tenant_id
sku_name = "premium"
network_acls {
default_action = var.policy
bypass = "AzureServices"
virtual_network_subnet_ids = var.subnet_ids
ip_rules = values(var.whitelist)
}
tags = {
environment = var.environment
owner = var.owner
}
}
resource "azurerm_key_vault_access_policy" "keyvault_k8s_policy" {
count = length(var.principal_id) > 0 ? 1 : 0
key_vault_id = azurerm_key_vault.keyvault.id
tenant_id = data.azurerm_client_config.current.tenant_id
object_id = var.principal_id
key_permissions = [
"get",
]
secret_permissions = [
"get",
]
}
# Admin Access
resource "azurerm_key_vault_access_policy" "keyvault_admin_policy" {
for_each = var.admin_principals
key_vault_id = azurerm_key_vault.keyvault.id
tenant_id = data.azurerm_client_config.current.tenant_id
object_id = each.value
key_permissions = [
"get",
"list",
"create",
"update",
"delete",
]
secret_permissions = [
"get",
"list",
"set",
]
# backup create delete deleteissuers get getissuers import list listissuers managecontacts manageissuers purge recover restore setissuers update
certificate_permissions = [
"get",
"list",
"create",
"import",
"listissuers",
"manageissuers",
"deleteissuers",
"backup",
"update",
]
}
resource "azurerm_monitor_diagnostic_setting" "keyvault_diagnostic" {
name = "${var.name}-${var.environment}-keyvault-diag"
target_resource_id = azurerm_key_vault.keyvault.id
log_analytics_workspace_id = var.workspace_id
log {
category = "AuditEvent"
enabled = true
retention_policy {
enabled = true
}
}
metric {
category = "AllMetrics"
retention_policy {
enabled = true
}
}
}

View File

@ -1,7 +0,0 @@
output "id" {
value = azurerm_key_vault.keyvault.id
}
output "url" {
value = azurerm_key_vault.keyvault.vault_uri
}

View File

@ -1,57 +0,0 @@
variable "region" {
type = string
description = "Region this module and resources will be created in"
}
variable "name" {
type = string
description = "Unique name for the services in this module"
}
variable "environment" {
type = string
description = "Environment these resources reside (prod, dev, staging, etc)"
}
variable "owner" {
type = string
description = "Owner of this environment"
}
variable "tenant_id" {
type = string
description = "The Tenant ID"
}
variable "principal_id" {
type = string
description = "The service principal_id of the k8s cluster"
}
variable "admin_principals" {
type = map
description = "A list of user principals who need access to manage the keyvault"
}
variable "subnet_ids" {
description = "List of subnet_ids that will have access to this service"
type = list
}
variable "policy" {
description = "The default policy for the network access rules (Allow/Deny)"
default = "Deny"
type = string
}
variable "whitelist" {
type = map
description = "A map of whitelisted IPs and CIDR ranges. For single IPs, Azure expects just the IP, NOT a /32."
default = {}
}
variable "workspace_id" {
description = "Log Analytics Workspace ID for sending logs generated by this resource"
type = string
}

View File

@ -1,27 +0,0 @@
resource "azurerm_resource_group" "lb" {
name = "${var.name}-${var.environment}-lb"
location = var.region
}
resource "azurerm_public_ip" "lb" {
name = "${var.name}-${var.environment}-ip"
location = var.region
resource_group_name = azurerm_resource_group.lb.name
allocation_method = "Static"
}
resource "azurerm_lb" "lb" {
name = "${var.name}-${var.environment}-lb"
location = var.region
resource_group_name = azurerm_resource_group.lb.name
frontend_ip_configuration {
name = "${var.name}-${var.environment}-ip"
public_ip_address_id = azurerm_public_ip.lb.id
}
tags = {
owner = var.owner
environment = var.environment
}
}

View File

@ -1,19 +0,0 @@
variable "region" {
type = string
description = "Region this module and resources will be created in"
}
variable "name" {
type = string
description = "Unique name for the services in this module"
}
variable "environment" {
type = string
description = "Environment these resources reside (prod, dev, staging, etc)"
}
variable "owner" {
type = string
description = "Owner of the environment and resources created in this module"
}

View File

@ -1,15 +0,0 @@
resource "azurerm_resource_group" "log_workspace" {
name = "${var.name}-${var.environment}-log-workspace"
location = var.region
}
resource "azurerm_log_analytics_workspace" "log_workspace" {
name = "${var.name}-${var.environment}-log-workspace"
location = azurerm_resource_group.log_workspace.location
resource_group_name = azurerm_resource_group.log_workspace.name
sku = "Premium"
tags = {
environment = var.environment
owner = var.owner
}
}

View File

@ -1,3 +0,0 @@
output "workspace_id" {
value = azurerm_log_analytics_workspace.log_workspace.id
}

View File

@ -1,19 +0,0 @@
variable "region" {
type = string
description = "Region this module and resources will be created in"
}
variable "name" {
type = string
description = "Unique name for the services in this module"
}
variable "environment" {
type = string
description = "Environment these resources reside (prod, dev, staging, etc)"
}
variable "owner" {
type = string
description = "Owner of the environment and resources created in this module"
}

View File

@ -1,20 +0,0 @@
resource "azurerm_resource_group" "identity" {
name = "${var.name}-${var.environment}-${var.identity}"
location = var.region
}
resource "azurerm_user_assigned_identity" "identity" {
resource_group_name = azurerm_resource_group.identity.name
location = azurerm_resource_group.identity.location
name = "${var.name}-${var.environment}-${var.identity}"
}
data "azurerm_subscription" "primary" {}
resource "azurerm_role_assignment" "roles" {
count = length(var.roles)
scope = data.azurerm_subscription.primary.id
role_definition_name = var.roles[count.index]
principal_id = azurerm_user_assigned_identity.identity.principal_id
}

View File

@ -1,11 +0,0 @@
output "id" {
value = azurerm_user_assigned_identity.identity.id
}
output "principal_id" {
value = azurerm_user_assigned_identity.identity.principal_id
}
output "client_id" {
value = azurerm_user_assigned_identity.identity.client_id
}

View File

@ -1,29 +0,0 @@
variable "region" {
type = string
description = "Region this module and resources will be created in"
}
variable "name" {
type = string
description = "Unique name for the services in this module"
}
variable "environment" {
type = string
description = "Environment these resources reside (prod, dev, staging, etc)"
}
variable "owner" {
type = string
description = "Owner of the environment and resources created in this module"
}
variable "identity" {
type = string
description = "Name of the managed identity to create"
}
variable "roles" {
type = list
description = "List of roles by name"
}

View File

@ -1,67 +0,0 @@
resource "azurerm_resource_group" "sql" {
name = "${var.name}-${var.environment}-postgres"
location = var.region
}
resource "azurerm_postgresql_server" "sql" {
name = "${var.name}-${var.environment}-sql"
location = azurerm_resource_group.sql.location
resource_group_name = azurerm_resource_group.sql.name
sku {
name = var.sku_name
capacity = var.sku_capacity
tier = var.sku_tier
family = var.sku_family
}
storage_profile {
storage_mb = var.storage_mb
backup_retention_days = var.storage_backup_retention_days
geo_redundant_backup = var.storage_geo_redundant_backup
auto_grow = var.storage_auto_grow
}
administrator_login = var.administrator_login
administrator_login_password = var.administrator_login_password
version = var.postgres_version
ssl_enforcement = var.ssl_enforcement
}
resource "azurerm_postgresql_virtual_network_rule" "sql" {
name = "${var.name}-${var.environment}-rule"
resource_group_name = azurerm_resource_group.sql.name
server_name = azurerm_postgresql_server.sql.name
subnet_id = var.subnet_id
ignore_missing_vnet_service_endpoint = true
}
resource "azurerm_postgresql_database" "db" {
name = "${var.name}-${var.environment}-atat"
resource_group_name = azurerm_resource_group.sql.name
server_name = azurerm_postgresql_server.sql.name
charset = "UTF8"
collation = "en-US"
}
resource "azurerm_monitor_diagnostic_setting" "postgresql_diagnostic" {
name = "${var.name}-${var.environment}-postgresql-diag"
target_resource_id = azurerm_postgresql_server.sql.id
log_analytics_workspace_id = var.workspace_id
log {
category = "PostgreSQLLogs"
enabled = true
retention_policy {
enabled = true
}
}
metric {
category = "AllMetrics"
retention_policy {
enabled = true
}
}
}

View File

@ -1,100 +0,0 @@
variable "region" {
type = string
description = "Region this module and resources will be created in"
}
variable "name" {
type = string
description = "Unique name for the services in this module"
}
variable "environment" {
type = string
description = "Environment these resources reside (prod, dev, staging, etc)"
}
variable "owner" {
type = string
description = "Owner of the environment and resources created in this module"
}
variable "subnet_id" {
type = string
description = "Subnet the SQL server should run"
}
variable "sku_name" {
type = string
description = "SKU name"
default = "GP_Gen5_2"
}
variable "sku_capacity" {
type = string
description = "SKU Capacity"
default = "2"
}
variable "sku_tier" {
type = string
description = "SKU Tier"
default = "GeneralPurpose"
}
variable "sku_family" {
type = string
description = "SKU Family"
default = "Gen5"
}
variable "storage_mb" {
type = string
description = "Size in MB of the storage used for the sql server"
default = "5120"
}
variable "storage_backup_retention_days" {
type = string
description = "Storage backup retention (days)"
default = "7"
}
variable "storage_geo_redundant_backup" {
type = string
description = "Geographic redundant backup (Enabled/Disabled)"
default = "Disabled"
}
variable "storage_auto_grow" {
type = string
description = "Auto Grow? (Enabled/Disabled)"
default = "Enabled"
}
variable "administrator_login" {
type = string
description = "Administrator login"
}
variable "administrator_login_password" {
type = string
description = "Administrator password"
}
variable "postgres_version" {
type = string
description = "Postgres version to use"
default = "10"
}
variable "ssl_enforcement" {
type = string
description = "Enforce SSL (Enabled/Disable)"
default = "Enabled"
}
variable "workspace_id" {
description = "Log Analytics workspace for this resource to log to"
type = string
}

View File

@ -1,38 +0,0 @@
resource "azurerm_resource_group" "redis" {
name = "${var.name}-${var.environment}-redis"
location = var.region
}
# NOTE: the Name used for Redis needs to be globally unique
resource "azurerm_redis_cache" "redis" {
name = "${var.name}-${var.environment}-redis"
location = azurerm_resource_group.redis.location
resource_group_name = azurerm_resource_group.redis.name
capacity = var.capacity
family = var.family
sku_name = var.sku_name
enable_non_ssl_port = var.enable_non_ssl_port
minimum_tls_version = var.minimum_tls_version
subnet_id = var.subnet_id
redis_configuration {
enable_authentication = var.enable_authentication
}
tags = {
environment = var.environment
owner = var.owner
}
}
resource "azurerm_monitor_diagnostic_setting" "redis_diagnostic" {
name = "${var.name}-${var.environment}-redis-diag"
target_resource_id = azurerm_redis_cache.redis.id
log_analytics_workspace_id = var.workspace_id
metric {
category = "AllMetrics"
retention_policy {
enabled = true
}
}
}

View File

@ -1,65 +0,0 @@
variable "region" {
type = string
description = "Region this module and resources will be created in"
}
variable "name" {
type = string
description = "Unique name for the services in this module"
}
variable "environment" {
type = string
description = "Environment these resources reside (prod, dev, staging, etc)"
}
variable "owner" {
type = string
description = "Owner of the environment and resources created in this module"
}
variable "capacity" {
type = string
default = 2
description = "The capacity of the redis cache"
}
variable "family" {
type = string
default = "C"
description = "The subscription family for redis"
}
variable "sku_name" {
type = string
default = "Standard"
description = "The sku to use"
}
variable "enable_non_ssl_port" {
type = bool
default = false
description = "Enable non TLS port (default: false)"
}
variable "minimum_tls_version" {
type = string
default = "1.2"
description = "Minimum TLS version to use"
}
variable "enable_authentication" {
type = bool
default = true
description = "Enable or disable authentication (default: true)"
}
variable "subnet_id" {
type = string
description = "Subnet ID that the service_endpoint should reside"
}
variable "workspace_id" {
description = "Log Analytics workspace for this resource to log to"
type = string
}

View File

@ -1,74 +0,0 @@
resource "azurerm_resource_group" "vpc" {
name = "${var.name}-${var.environment}-vpc"
location = var.region
tags = {
environment = var.environment
owner = var.owner
}
}
resource "azurerm_network_ddos_protection_plan" "vpc" {
count = var.ddos_enabled
name = "${var.name}-${var.environment}-ddos"
location = azurerm_resource_group.vpc.location
resource_group_name = azurerm_resource_group.vpc.name
}
resource "azurerm_virtual_network" "vpc" {
name = "${var.name}-${var.environment}-network"
location = azurerm_resource_group.vpc.location
resource_group_name = azurerm_resource_group.vpc.name
address_space = ["${var.virtual_network}"]
dns_servers = var.dns_servers
tags = {
environment = var.environment
owner = var.owner
}
}
resource "azurerm_subnet" "subnet" {
for_each = var.networks
name = "${var.name}-${var.environment}-${each.key}"
resource_group_name = azurerm_resource_group.vpc.name
virtual_network_name = azurerm_virtual_network.vpc.name
address_prefix = element(split(",", each.value), 0)
# See https://github.com/terraform-providers/terraform-provider-azurerm/issues/3471
lifecycle {
ignore_changes = [route_table_id]
}
service_endpoints = split(",", var.service_endpoints[each.key])
#delegation {
# name = "acctestdelegation"
#
# service_delegation {
# name = "Microsoft.ContainerInstance/containerGroups"
# actions = ["Microsoft.Network/virtualNetworks/subnets/action"]
# }
#}
}
resource "azurerm_route_table" "route_table" {
for_each = var.route_tables
name = "${var.name}-${var.environment}-${each.key}"
location = azurerm_resource_group.vpc.location
resource_group_name = azurerm_resource_group.vpc.name
}
resource "azurerm_subnet_route_table_association" "route_table" {
for_each = var.networks
subnet_id = azurerm_subnet.subnet[each.key].id
route_table_id = azurerm_route_table.route_table[each.key].id
}
resource "azurerm_route" "route" {
for_each = var.route_tables
name = "${var.name}-${var.environment}-default"
resource_group_name = azurerm_resource_group.vpc.name
route_table_name = azurerm_route_table.route_table[each.key].name
address_prefix = "0.0.0.0/0"
next_hop_type = each.value
}

View File

@ -1,13 +0,0 @@
output "subnets" {
value = azurerm_subnet.subnet["private"].id #FIXED: this is now legacy, use subnet_list
}
output "subnet_list" {
value = {
for k, id in azurerm_subnet.subnet : k => id
}
}
output "id" {
value = azurerm_virtual_network.vpc.id
}

View File

@ -1,48 +0,0 @@
variable "environment" {
description = "Environment (Prod,Dev,etc)"
}
variable "region" {
description = "Region (useast2, etc)"
}
variable "name" {
description = "Name or prefix to use for all resources created by this module"
}
variable "owner" {
description = "Owner of these resources"
}
variable "ddos_enabled" {
description = "Enable or disable DDoS Protection (1,0)"
default = "0"
}
variable "virtual_network" {
description = "The supernet used for this VPC a.k.a Virtual Network"
type = string
}
variable "networks" {
description = "A map of lists describing the network topology"
type = map
}
variable "dns_servers" {
description = "DNS Server IPs for internal and public DNS lookups (must be on a defined subnet)"
type = list
}
variable "route_tables" {
type = map
description = "A map with the route tables to create"
}
variable "service_endpoints" {
type = map
description = "A map of the service endpoints and its mapping to subnets"
}

View File

@ -1,29 +0,0 @@
# Task order bucket is required to be accessible publicly by the users.
# which is why the policy here is "Allow"
module "task_order_bucket" {
source = "../../modules/bucket"
service_name = "jeditasksatat"
owner = var.owner
name = var.name
environment = var.environment
region = var.region
policy = "Allow"
subnet_ids = [module.vpc.subnets]
whitelist = var.storage_admin_whitelist
}
# TF State should be restricted to admins only, but IP protected
# This has to be public due to a chicken/egg issue of VPN not
# existing until TF is run. If this bucket is private, you would
# not be able to access it when running TF without being on a VPN.
module "tf_state" {
source = "../../modules/bucket"
service_name = "jedidevtfstate"
owner = var.owner
name = var.name
environment = var.environment
region = var.region
policy = "Deny"
subnet_ids = []
whitelist = var.storage_admin_whitelist
}

View File

@ -1,9 +0,0 @@
module "cdn" {
source = "../../modules/cdn"
origin_host_name = "staging.atat.code.mil"
owner = var.owner
environment = var.environment
name = var.name
region = var.region
workspace_id = module.logs.workspace_id
}

View File

@ -1,12 +0,0 @@
module "container_registry" {
source = "../../modules/container_registry"
name = var.name
region = var.region
environment = var.environment
owner = var.owner
backup_region = var.backup_region
policy = "Deny"
subnet_ids = [module.vpc.subnet_list["private"].id]
whitelist = var.admin_user_whitelist
workspace_id = module.logs.workspace_id
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 85 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 71 KiB

View File

@ -1,50 +0,0 @@
@startuml USEAST Development Network
title USEAST Development Network
cloud Internet
cloud Azure {
[Azure Storage] as storage
[Azure CDN] as cdn
cdn --> storage : "HTTPS/443"
note as cdn_note
CDN and Azure storage are
managed by Azure and configured
for geographic failover
end note
}
frame "USEAST Virtual Network" as vnet {
frame "Public Route Table" as public_rt{
frame "Public Subnet" as public_subnet {
[ALB]
[Internet] --> ALB
note as public_useast
10.1.1.0/24
end note
}
}
frame "Private Route Table" as private_rt{
frame "Private Subnet" as private_subnet {
[AKS]
[Redis]
[Postgres]
[AzurePrivateStorage]
AKS --> Redis : "TLS:6379"
AKS --> Postgres : "TLS:5432"
AKS --> AzurePrivateStorage : "HTTPS/443"
[ALB] --> AKS : "HTTPS:443"
note as private_useast
10.1.2.0/24
end note
}
}
}
frame "US West Backup Region" as backupregion {
component "Backup Postgres" as pgbackup
[Postgres] --> pgbackup : "Private Peering / TLS:5432"
}
note right of [ALB] : Azure Load Balancer restricted to AKS only
@enduml

View File

@ -1,40 +0,0 @@
@startuml USWEST Development Network
title USWEST Development Network
cloud Internet
frame "USEAST Virtual Network" as vnet {
frame "Public Route Table" as public_rt{
frame "Public Subnet" as public_subnet {
[ALB]
[Internet] --> ALB
note as public_useast
10.2.1.0/24
end note
}
}
frame "Private Route Table" as private_rt{
frame "Private Subnet" as private_subnet {
[AKS]
[Redis]
[Postgres]
[AzurePrivateStorage]
AKS --> Redis : "TLS:6379"
AKS --> Postgres : "TLS:5432"
AKS --> AzurePrivateStorage : "HTTPS/443"
[ALB] --> AKS : "HTTPS:443"
note as private_useast
10.2.2.0/24
end note
}
}
}
frame "USEAST Primary Region " as primary_region{
component "Postgres" as pgbackup
[Postgres] --> pgbackup : "Private Peering / TLS:5432"
}
note right of [ALB] : Azure Load Balancer restricted to AKS only
@enduml

View File

@ -1,10 +0,0 @@
module "keyvault_reader_identity" {
source = "../../modules/managed_identity"
name = var.name
owner = var.owner
environment = var.environment
region = var.region
identity = "${var.name}-${var.environment}-vault-reader"
roles = ["Reader", "Managed Identity Operator"]
}

View File

@ -1,43 +0,0 @@
data "azurerm_key_vault_secret" "k8s_client_id" {
name = "k8s-client-id"
key_vault_id = module.operator_keyvault.id
}
data "azurerm_key_vault_secret" "k8s_client_secret" {
name = "k8s-client-secret"
key_vault_id = module.operator_keyvault.id
}
module "k8s" {
source = "../../modules/k8s"
region = var.region
name = var.name
environment = var.environment
owner = var.owner
k8s_dns_prefix = var.k8s_dns_prefix
k8s_node_size = var.k8s_node_size
vnet_subnet_id = module.vpc.subnets #FIXME - output from module.vpc.subnets should be map
enable_auto_scaling = true
max_count = 5
min_count = 3
client_id = data.azurerm_key_vault_secret.k8s_client_id.value
client_secret = data.azurerm_key_vault_secret.k8s_client_secret.value
workspace_id = module.logs.workspace_id
vnet_id = module.vpc.id
}
#module "main_lb" {
# source = "../../modules/lb"
# region = var.region
# name = "main-${var.name}"
# environment = var.environment
# owner = var.owner
#}
#module "auth_lb" {
# source = "../../modules/lb"
# region = var.region
# name = "auth-${var.name}"
# environment = var.environment
# owner = var.owner
#}

View File

@ -1,15 +0,0 @@
module "keyvault" {
source = "../../modules/keyvault"
name = "cz"
region = var.region
owner = var.owner
environment = var.environment
tenant_id = var.tenant_id
principal_id = "f9bcbe58-8b73-4957-aee2-133dc3e58063"
admin_principals = var.admin_users
policy = "Deny"
subnet_ids = [module.vpc.subnets]
whitelist = var.admin_user_whitelist
workspace_id = module.logs.workspace_id
}

View File

@ -1,8 +0,0 @@
module "logs" {
source = "../../modules/log_analytics"
owner = var.owner
environment = var.environment
region = var.region
name = var.name
}

View File

@ -1,21 +0,0 @@
data "azurerm_key_vault_secret" "postgres_username" {
name = "postgres-root-user"
key_vault_id = module.operator_keyvault.id
}
data "azurerm_key_vault_secret" "postgres_password" {
name = "postgres-root-password"
key_vault_id = module.operator_keyvault.id
}
module "sql" {
source = "../../modules/postgres"
name = var.name
owner = var.owner
environment = var.environment
region = var.region
subnet_id = module.vpc.subnet_list["private"].id
administrator_login = data.azurerm_key_vault_secret.postgres_username.value
administrator_login_password = data.azurerm_key_vault_secret.postgres_password.value
workspace_id = module.logs.workspace_id
}

View File

@ -1,17 +0,0 @@
provider "azurerm" {
version = "=1.40.0"
}
provider "azuread" {
# Whilst version is optional, we /strongly recommend/ using it to pin the version of the Provider being used
version = "=0.7.0"
}
terraform {
backend "azurerm" {
resource_group_name = "cloudzero-jedidev-jedidevtfstate"
storage_account_name = "jedidevtfstate"
container_name = "tfstate"
key = "dev.terraform.tfstate"
}
}

View File

@ -1,11 +0,0 @@
module "redis" {
source = "../../modules/redis"
owner = var.owner
environment = var.environment
region = var.region
name = var.name
subnet_id = module.vpc.subnet_list["redis"].id
sku_name = "Premium"
family = "P"
workspace_id = module.logs.workspace_id
}

View File

@ -1,14 +0,0 @@
module "operator_keyvault" {
source = "../../modules/keyvault"
name = "ops"
region = var.region
owner = var.owner
environment = var.environment
tenant_id = var.tenant_id
principal_id = ""
admin_principals = var.admin_users
policy = "Deny"
subnet_ids = [module.vpc.subnets]
whitelist = var.admin_user_whitelist
workspace_id = module.logs.workspace_id
}

View File

@ -1,111 +0,0 @@
variable "environment" {
default = "jedidev"
}
variable "region" {
default = "eastus"
}
variable "backup_region" {
default = "westus2"
}
variable "owner" {
default = "dev"
}
variable "name" {
default = "cloudzero"
}
variable "virtual_network" {
type = string
default = "10.1.0.0/16"
}
variable "networks" {
type = map
default = {
#format
#name = "CIDR, route table, Security Group Name"
public = "10.1.1.0/24,public" # LBs
private = "10.1.2.0/24,private" # k8s, postgres, keyvault
redis = "10.1.3.0/24,private" # Redis
apps = "10.1.4.0/24,private" # Redis
}
}
variable "service_endpoints" {
type = map
default = {
public = "Microsoft.ContainerRegistry" # Not necessary but added to avoid infinite state loop
private = "Microsoft.Storage,Microsoft.KeyVault,Microsoft.ContainerRegistry,Microsoft.Sql"
redis = "Microsoft.Storage,Microsoft.Sql" # FIXME: There is no Microsoft.Redis
apps = "Microsoft.Storage,Microsoft.KeyVault,Microsoft.ContainerRegistry,Microsoft.Sql"
}
}
variable "route_tables" {
description = "Route tables and their default routes"
type = map
default = {
public = "Internet"
private = "Internet" # TODO: Switch to FW
redis = "VnetLocal"
apps = "Internet" # TODO: Switch to FW
}
}
variable "dns_servers" {
type = list
default = []
}
variable "k8s_node_size" {
type = string
default = "Standard_A1_v2"
}
variable "k8s_dns_prefix" {
type = string
default = "atat"
}
variable "tenant_id" {
type = string
default = "47f616e9-6ff5-4736-9b9e-b3f62c93a915"
}
variable "admin_users" {
type = map
default = {
"Rob Gil" = "cef37d01-1acf-4085-96c8-da9d34d0237e"
"Dan Corrigan" = "7e852ceb-eb0d-49b1-b71e-e9dcd1082ffc"
}
}
variable "admin_user_whitelist" {
type = map
default = {
"Rob Gil" = "66.220.238.246/32"
"Dan Corrigan Work" = "108.16.207.173/32"
"Dan Corrigan Home" = "71.162.221.27/32"
}
}
variable "storage_admin_whitelist" {
type = map
default = {
"Rob Gil" = "66.220.238.246"
"Dan Corrigan Work" = "108.16.207.173"
"Dan Corrigan Home" = "71.162.221.27"
}
}
variable "vpn_client_cidr" {
type = list
default = ["172.16.255.0/24"]
}

Some files were not shown because too many files have changed in this diff Show More