From 97df63857699357c34cbf81cd991301c3c44cc3c Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Mon, 3 Oct 2022 17:34:51 -0400 Subject: [PATCH 01/29] Doc/update terraform variables (#2009) * Updated most Terraform modules' variables files to the 1.0 specification, and improved inline documentation and comments. * Added more documentation around Terraform variables. * Added sample.tfvars for AWS modules, and provided documentation on most variables. The biggest gap currently is ami_account_id and csoc_account_id, which don't have good WHY documentation. Co-authored-by: jawadqur <55899496+jawadqur@users.noreply.github.com> --- .secrets.baseline | 22 +- tf_files/aws/access/sample.tfvars | 5 + tf_files/aws/account-policies/sample.tfvars | 7 +- .../aws/account_management-logs/sample.tfvars | 9 + tf_files/aws/batch/sample.tfvars | 67 ++++ .../aws/bucket_manifest_utils/sample.tfvars | 44 +++ tf_files/aws/cognito/sample.tfvars | 52 +++- tf_files/aws/commons/sample.tfvars | 288 ++++++++++++++++++ tf_files/aws/commons_sns/sample.tfvars | 12 + tf_files/aws/commons_vpc_es/sample.tfvars | 32 ++ tf_files/aws/commons_vpc_es/variables.tf | 1 + tf_files/aws/csoc_admin_vm/sample.tfvars | 37 +++ tf_files/aws/csoc_admin_vm/variables.tf | 1 + .../aws/csoc_common_logging/sample.tfvars | 35 +++ .../aws/csoc_management-logs/sample.tfvars | 12 + .../aws/csoc_management-logs/variables.tf | 1 - tf_files/aws/csoc_qualys_vm/sample.tfvars | 43 +++ tf_files/aws/csoc_qualys_vm/variables.tf | 1 + tf_files/aws/data_bucket/sample.tfvars | 13 + tf_files/aws/data_bucket/variables.tf | 2 + tf_files/aws/data_bucket_queue/sample.tfvars | 1 + tf_files/aws/demolab/sample.tfvars | 16 + tf_files/aws/eks/sample.tfvars | 129 ++++++++ tf_files/aws/eks/variables.tf | 5 +- tf_files/aws/encrypted-rds/sample.tfvars | 212 ++++++++++++- tf_files/aws/kubecost/sample.tfvars | 16 +- tf_files/aws/publicvm/sample.tfvars | 31 +- tf_files/aws/publicvm/variables.tf | 1 - tf_files/aws/rds/sample.tfvars | 210 +++++++++---- tf_files/aws/rds/variables.tf | 4 +- tf_files/aws/rds_snapshot/sample.tfvars | 17 ++ tf_files/aws/rds_snapshot/variables.tf | 3 + tf_files/aws/role/sample.tfvars | 23 +- .../aws/role_policy_attachment/sample.tfvars | 8 + tf_files/aws/sftp/sample.tfvars | 6 + tf_files/aws/slurm/sample.tfvars | 2 +- tf_files/aws/sqs/sample.tfvars | 8 + tf_files/aws/sqs/variables.tf | 1 + tf_files/aws/squid_auto/sample.tfvars | 96 ++++++ tf_files/aws/squid_auto/variables.tf | 7 +- tf_files/aws/squid_nlb_central/sample.tfvars | 45 +++ tf_files/aws/squid_nlb_central/variables.tf | 12 +- tf_files/aws/squid_vm/sample.tfvars | 25 ++ tf_files/aws/squid_vm/variables.tf | 3 + .../aws/squidnlb_standalone/sample.tfvars | 45 +++ tf_files/aws/squidnlb_standalone/variables.tf | 9 +- tf_files/aws/storage-gateway/sample.tfvars | 22 ++ tf_files/aws/storage-gateway/variables.tf | 2 - tf_files/aws/user_generic/sample.tfvars | 5 + tf_files/aws/user_vpc/sample.tfvars | 24 ++ tf_files/aws/utility_admin/sample.tfvars | 69 ++++- tf_files/aws/utility_admin/variables.tf | 2 + tf_files/aws/utility_vm/sample.tfvars | 85 ++++++ tf_files/aws/utility_vm/variables.tf | 2 + tf_files/aws/vpn_nlb_central/sample.tfvars | 63 ++++ tf_files/aws/vpn_nlb_central/variables.tf | 18 +- 56 files changed, 1771 insertions(+), 140 deletions(-) create mode 100644 tf_files/aws/access/sample.tfvars create mode 100644 tf_files/aws/account_management-logs/sample.tfvars create mode 100644 tf_files/aws/batch/sample.tfvars create mode 100644 tf_files/aws/bucket_manifest_utils/sample.tfvars create mode 100644 tf_files/aws/commons/sample.tfvars create mode 100644 tf_files/aws/commons_sns/sample.tfvars create mode 100644 tf_files/aws/commons_vpc_es/sample.tfvars create mode 100644 tf_files/aws/csoc_admin_vm/sample.tfvars create mode 100644 tf_files/aws/csoc_common_logging/sample.tfvars create mode 100644 tf_files/aws/csoc_management-logs/sample.tfvars create mode 100644 tf_files/aws/csoc_qualys_vm/sample.tfvars create mode 100644 tf_files/aws/data_bucket/sample.tfvars create mode 100644 tf_files/aws/demolab/sample.tfvars create mode 100644 tf_files/aws/eks/sample.tfvars create mode 100644 tf_files/aws/rds_snapshot/sample.tfvars create mode 100644 tf_files/aws/role_policy_attachment/sample.tfvars create mode 100644 tf_files/aws/sqs/sample.tfvars create mode 100644 tf_files/aws/squid_auto/sample.tfvars create mode 100644 tf_files/aws/squid_nlb_central/sample.tfvars create mode 100644 tf_files/aws/squid_vm/sample.tfvars create mode 100644 tf_files/aws/squidnlb_standalone/sample.tfvars create mode 100644 tf_files/aws/storage-gateway/sample.tfvars create mode 100644 tf_files/aws/user_generic/sample.tfvars create mode 100644 tf_files/aws/user_vpc/sample.tfvars create mode 100644 tf_files/aws/utility_vm/sample.tfvars create mode 100644 tf_files/aws/vpn_nlb_central/sample.tfvars diff --git a/.secrets.baseline b/.secrets.baseline index 8ede85939..7a459b129 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "^.secrets.baseline$", "lines": null }, - "generated_at": "2022-06-21T21:12:27Z", + "generated_at": "2022-07-29T15:31:31Z", "plugins_used": [ { "name": "AWSKeyDetector" @@ -2235,12 +2235,21 @@ "type": "Secret Keyword" } ], + "tf_files/aws/eks/sample.tfvars": [ + { + "hashed_secret": "83c1003f406f34fba4d6279a948fee3abc802884", + "is_secret": false, + "is_verified": false, + "line_number": 107, + "type": "Hex High Entropy String" + } + ], "tf_files/aws/eks/variables.tf": [ { "hashed_secret": "83c1003f406f34fba4d6279a948fee3abc802884", "is_secret": false, "is_verified": false, - "line_number": 135, + "line_number": 133, "type": "Hex High Entropy String" } ], @@ -2412,15 +2421,6 @@ "type": "Hex High Entropy String" } ], - "tf_files/aws/rds/sample.tfvars": [ - { - "hashed_secret": "76c3c4836dee37d8d0642949f84092a9a24bbf46", - "is_secret": false, - "is_verified": false, - "line_number": 7, - "type": "Secret Keyword" - } - ], "tf_files/aws/slurm/README.md": [ { "hashed_secret": "fd85d792fa56981cf6a8d2a5c0857c74af86e99d", diff --git a/tf_files/aws/access/sample.tfvars b/tf_files/aws/access/sample.tfvars new file mode 100644 index 000000000..5e7b9b853 --- /dev/null +++ b/tf_files/aws/access/sample.tfvars @@ -0,0 +1,5 @@ +#The URL to an S3 bucket we want to work with +access_url = "" + +#The ARN to an Amazon ACM-managed certificate +access_cert = "" \ No newline at end of file diff --git a/tf_files/aws/account-policies/sample.tfvars b/tf_files/aws/account-policies/sample.tfvars index 7a6d09a0d..2147c1e2e 100644 --- a/tf_files/aws/account-policies/sample.tfvars +++ b/tf_files/aws/account-policies/sample.tfvars @@ -1 +1,6 @@ -# defaults shold usually be ok - check variables.tf +#The AWS region we are working in +region = "us-east-1" + + +#The IAM roles to be created +roles = ["devopsdirector", "bsdisocyber", "projectmanagerplanx", "devopsplanx", "devplanx"] \ No newline at end of file diff --git a/tf_files/aws/account_management-logs/sample.tfvars b/tf_files/aws/account_management-logs/sample.tfvars new file mode 100644 index 000000000..8b6cd3bd9 --- /dev/null +++ b/tf_files/aws/account_management-logs/sample.tfvars @@ -0,0 +1,9 @@ +#ID of AWS account that owns the public AMIs +#TODO clarification +csoc_account_id = "433568766270" + +#TODO check what these are used for. This module seems to use csoc_common_logging, +#which seems to use modules/common-logging. Neither of those appear to have these two +account_name = "" + +alarm_actions = "" diff --git a/tf_files/aws/batch/sample.tfvars b/tf_files/aws/batch/sample.tfvars new file mode 100644 index 000000000..a129bf0fa --- /dev/null +++ b/tf_files/aws/batch/sample.tfvars @@ -0,0 +1,67 @@ +#A tag used to identify resources associated with this job. +job_id = "" + +#This is a prefix that will be applied to resources generated as part of this deployment. It is for tracking purposes. +#This is generally the long name of the job, which is the hostname + job type + job ID. +prefix = "" + +#The name of the AWS batch job definition +batch_job_definition_name = "" + +#This is the location of a JSON file that contains an AWS Batch job definition, containing information such as +#the name of the container to use and resources to allocate. +#More information can be found here: https://docs.aws.amazon.com/batch/latest/userguide/job_definitions.html +container_properties = "" + +#The name of the IAM instance role to be attached to the machines running this batch job. An instance role is a limited role +#applied to EC2 instances to allow them to access designated resources. +#More information can be found at: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html +iam_instance_role = "" + +#The instance profile to attach to attach to EC2 machines. The instance profile is associated with a role, and is the +#resource that is associated with a specific EC2 instance to give it access to desired resources. More information can be +#found at: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html +iam_instance_profile_role = "" + +#The role that allows AWS Batch itself (not the EC2 instances) to access needed resources. More information can be found at: +#https://docs.aws.amazon.com/batch/latest/userguide/service_IAM_role.html +aws_batch_service_role = "" + +#The name of the security group associated with this batch job +aws_batch_compute_environment_sg = "" + +#The name of the batch compute environment to run the jobs in. A job environment consits of ECS container instances that can +#run the job. +compute_environment_name = "" + +#What type of EC2 instance to use in order to handle the job. +instance_type = ["c4.large"] + +priority = 10 + +#The maximum number of EC2 vCPUs that an environment can use. +max_vcpus = 256 + +#The minimum number of EC2 vCPUs that an environment should maintain. +min_vcpus = 0 + +#What type of compute environment to use. Valid selections are [EC2, SPOT] +compute_env_type = "EC2" + +#Valid options are [MANAGED, UNMANAGED] +#This controls whether AWS manages spinning up the resources for us, or if we bring our own environment. +#DO NOT USE UNMANAGED unless you know what you're doing. +compute_type = "MANAGED" + +#The EC2 key pair that is used for instances launched in the compute environment. +ec2_key_pair = "giangb" + +#The name of the job queue to create as part of this deployment. +batch_job_queue_name = "" + +#The name of the SQS queue that will be created as a part of this deployment. The queue is the primary way that different nodes +#communicate that they have completed a part of the batch job, and pass their completed parts to the next stage of the pipeline +sqs_queue_name = "" + +#The name of the bucket the results should be output to. +output_bucket_name = "" diff --git a/tf_files/aws/bucket_manifest_utils/sample.tfvars b/tf_files/aws/bucket_manifest_utils/sample.tfvars new file mode 100644 index 000000000..63d5e434f --- /dev/null +++ b/tf_files/aws/bucket_manifest_utils/sample.tfvars @@ -0,0 +1,44 @@ +#Path to the function file +lambda_function_file = "" + +#Name of the function you are creating +lambda_function_name = "" + +#Description of the function +lambda_function_description = "" + +#IAM role ARN to attach to the function +lambda_function_iam_role_arn = "" + +#The name of the Amazon Lambda function that will handle the task. +#For a Python-focused example, see here: https://docs.aws.amazon.com/lambda/latest/dg/python-handler.html +lambda_function_handler = "lambda_function.handler" + +#Language and version to use to run the lambda function. +#For more information, see: https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html +lambda_function_runtime = "python3.7" + + +#Timeout of the function in seconds +lambda_function_timeout = 3 + +#How much RAM in MB will be used +lambda_function_memory_size = 128 + +#A map containing key-value pairs that define environment variables for the function +lambda_function_env = {} + +#A map contaning key-value pairs used in AWS to filter and search for resources +lambda_function_tags = {} + +#Whether the function will be attached to a VPC. Valid options are [true, false] +lambda_function_with_vpc = false + +#List of security groups for the lambda function with a vpc +lambda_function_security_groups = [] + +#List of subnets for the lambda function with a vpc +lambda_function_subnets_id = [] + + + diff --git a/tf_files/aws/cognito/sample.tfvars b/tf_files/aws/cognito/sample.tfvars index 05ebe2548..bf480e475 100644 --- a/tf_files/aws/cognito/sample.tfvars +++ b/tf_files/aws/cognito/sample.tfvars @@ -1,10 +1,44 @@ -vpc_name = "INSERT VPC NAME HERE" -cognito_provider_name = "federation name" -cognito_domain_name = "subname for .auth.us-east-1.amazoncognito.com" -cognito_callback_urls = ["https://url1"] -cognito_provider_details = {"MetadataURL"="https://someurl"} -tags = { - "Organization" = "PlanX" - "Environment" = "CSOC" -} +#A list of allowed OAuth Flows +cognito_oauth_flows = ["code", "implicit"] + +#A user directory for Amazon Cognito, which handles sign-on for users. This is generally given the same name as the +#name of the app using the service. +cognito_user_pool_name = "fence" + +#The identity provider types that Cognito will use. An identity provider is a service that stores and manages +#identities. See: https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_CreateIdentityProvider.html#CognitoUserPools-CreateIdentityProvider-request-ProviderType +cognito_provider_type = "SAML" + +#The attribute mapping is how Cognito translates the information about a user recieved from an identitiy provider into +#the attributes that Cognito expects from a user. +#For more information, see: https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-specifying-attribute-mapping.html +cognito_attribute_mapping = { + "email" = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress" + } + +#The OAuth scopes specify what information from a user's account Cognito is able to access. Scopes are provider-specific, and +#you will need to consult the documentation for your identity provider to determine what scopes are necessary and valid +cognito_oauth_scopes = ["email", "openid"] + +#Details about the auth provider, for this module most likely the MetadataURL or MetadataFILE +cognito_provider_details = {} + +#The name of the VPC that the Cognito pool will be created in +vpc_name = "" + +#The address of the sign-in and sign-up pages +cognito_domain_name = "" + +#The URL(s) that can be redirected to after a successful sign-in +cognito_callback_urls = [] + +#The name of the provided identity provider. This is the name used within AWS +cognito_provider_name = "" + +#A map contaning key-value pairs used in AWS to filter and search for resources +tags = { + "Organization" = "PlanX" + "Environment" = "CSOC" + } + diff --git a/tf_files/aws/commons/sample.tfvars b/tf_files/aws/commons/sample.tfvars new file mode 100644 index 000000000..b73e57a6c --- /dev/null +++ b/tf_files/aws/commons/sample.tfvars @@ -0,0 +1,288 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-28 12:08:31.473975 + +#The name of the VPC for this commons +vpc_name = "Commons1" + +#The CIDR block to allocate to the VPC for this commons +vpc_cidr_block = "172.24.17.0/20" + +#A secondary CIDR block to allocate to the VPC for this commons, in case of network expansion +secondary_cidr_block = false + +#The type(s) of traffic covered by flow logs +vpc_flow_traffic = "ALL" + +#The region to bring up this commons in +aws_region = "us-east-1" + +#An AWS ARN for the certificate to use on the Load Balancer in front of the commons. Because all access to a commons is through HTTPS, this is required +aws_cert_name = "AWS-CERTIFICATE-NAME" + +# +#TODO Figure out how to explain this +csoc_account_id = "433568766270" + +#The CIDR of the VPC from which the commands to bring up this commons are being run; this will enable access +peering_cidr = "10.128.0.0/20" + +#The size of the fence DB, in GiB +fence_db_size = 10 + +#The size of the sheepdog DB, in GiB +sheepdog_db_size = 10 + +#The size of the indexd DB, in GiB +indexd_db_size = 10 + +#The password for the fence DB +db_password_fence= "" + +#The password for the gdcapi DB +db_password_gdcapi = "" + +#This indexd guid prefix should come from Trevar/ZAC +indexd_prefix = "dg.XXXX/" + +#The password for the peregrine DB +db_password_peregrine= "" + +#The password for the sheepdog DB +db_password_sheepdog= "" + +#The password for the indexd DB +db_password_indexd= "" + +#The URL for the data dictionary schema. It must be in JSON format. For more info, see: https://gen3.org/resources/user/dictionary/ +dictionary_url= "" + +#A configuration to specify a customization profile for the the commons' front-end +portal_app = "dev" + +#If you wish to start fence pre-populated with data, this is the RDS snapshot that fence will start off of +fence_snapshot = "" + +#If you wish to start gdcapi pre-populated with data, this is the RDS snapshot that gdcapi will start off of +gdcapi_snapshot = "" + +#If you wish to start peregrine pre-populated with data, this is the RDS snapshot that peregrine will start off of +peregrine_snapshot = "" + +#If you wish to start sheepdog pre-populated with data, this is the RDS snapshot that it will start off of +sheepdog_snapshot = "" + +#If you wish to start indexd pre-populated with data, this is the RDS snapshot that it will start off of +indexd_snapshot = "" + +#Instance type to use for fence. For more information on DB instance types, see: +#https://aws.amazon.com/rds/instance-types/ +fence_db_instance = "db.t3.small" + +#Instance type to use for sheepdog. For more information on DB instance types, see: +#https://aws.amazon.com/rds/instance-types/ +sheepdog_db_instance = "db.t3.small" + +#Instance type to use for indexd. For more information on DB instance types, see: +#https://aws.amazon.com/rds/instance-types/ +indexd_db_instance = "db.t3.small" + +#Hostname that the commons will use for access; i.e. the URL that people will use to access the commons over the internet +hostname = "dev.bionimbus.org" + +#A list of SSH keys that will be added to compute resources deployed by this module, including Squid proxy instances +kube_ssh_key= "" + +#Google client ID for authentication purposes. If you don't want to enable Google sign in, leave blank +google_client_id= "" + +#Secret for the above client ID. Set this to blank as well if you do not want Google sign in +google_client_secret= "" + +#GDCAPI secret key +gdcapi_secret_key= "" + +#Search criteria for squid AMI look up +squid_image_search_criteria = "ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*" + +#The ID of the VPC that the commands to bring this commons up are run in, for access purposes +peering_vpc_id = "vpc-e2b51d99" + +#The name of the NLB service endpoint for Squid +squid-nlb-endpointservice-name = "com.amazonaws.vpce.us-east-1.vpce-svc-0ce2261f708539011" + +#A webhook used to send alerts in a Slack channel https://api.slack.com/messaging/webhooks +slack_webhook = "" + +#A webhook used to send alerts in a secondary Slack channel https://api.slack.com/messaging/webhooks +secondary_slack_webhook = "" + +#Threshold for database storage utilization. Represents a percentage, if this limit is reached, the Slack webhooks are used to send an alert +alarm_threshold = "85" + +#The name of the organization, for tagging the resources for easier tracking +organization_name = "Basic Service" + +#NOT CURRENTLY IN USE +mailgun_smtp_host = "smtp.mailgun.org" + +#NOT CURRENTLY IN USE +mailgun_api_url = "https://api.mailgun.net/v3/" + +#Whether or not fence should be deployed in a highly-available configuraiton +fence_ha = false + +#Whether or not sheepdog should be deployed in a highly-available configuration +sheepdog_ha = false + +#Whether or not indexd should be deployed in a highly-available configuration +indexd_ha = false + +#A maintenance window for fence +fence_maintenance_window = "SAT:09:00-SAT:09:59" + +#A maintenance window for sheepdog +sheepdog_maintenance_window = "SAT:10:00-SAT:10:59" + +#A maintenance window for indexd +indexd_maintenance_window = "SAT:11:00-SAT:11:59" + +#How many snapshots should be kept for fence +fence_backup_retention_period = "4" + +#How many snapshots should be kept for sheepdog +sheepdog_backup_retention_period = "4" + +#How many snapshots should be kept for indexd +indexd_backup_retention_period = "4" + +#A backup window for fence +fence_backup_window = "06:00-06:59" + +#A backup window for sheepdog +sheepdog_backup_window = "07:00-07:59" + +#A backup window for indexd +indexd_backup_window = "08:00-08:59" + +#The version of the fence engine to run (by default postgres) +fence_engine_version = "13.3" + +#The version of the sheepdog engine to run +sheepdog_engine_version = "13.3" + +#The version of the indexd engine to run +indexd_engine_version = "13.3" + +#Whether or not to enable automatic upgrades of minor version for fence +fence_auto_minor_version_upgrade = "true" + +#Whether or not to enable automatic upgrades of minor versions for indexd +indexd_auto_minor_version_upgrade = "true" + +#Whether or not to enable automatic upgrades of minor versions for sheepdog +sheepdog_auto_minor_version_upgrade = "true" + +#Bucket name where to pull users.yaml for permissions +users_bucket_name = "cdis-gen3-users" + +#Name of fence database. Not the same as instance identifier +fence_database_name = "fence" + +#Name of sheepdog database. Not the same as instance identifier +sheepdog_database_name = "gdcapi" + +#Name of indexd database. Not the same as instance identifier +indexd_database_name = "indexd" + +#Username for fence DB +fence_db_username = "fence_user" + +#Username for sheepdog DB +sheepdog_db_username = "sheepdog" + +#Username for indexd DB +indexd_db_username = "indexd_user" + +#Whether or not fence can automatically upgrade major versions +fence_allow_major_version_upgrade = "true" + +#Whether or not sheepdog can automatically upgrade major versions +sheepdog_allow_major_version_upgrade = "true" + +#Whether or not indexd can automatically upgrade major versions +indexd_allow_major_version_upgrade = "true" + +#Instance type for HA squid +ha-squid_instance_type = "t3.medium" + +#Volume size for HA squid instances +ha-squid_instance_drive_size = 8 + +#Bootstrapt script for ha-squid instances +ha-squid_bootstrap_script = "squid_running_on_docker.sh" + +#additional variables to pass along with the bootstrapscript +ha-squid_extra_vars = ["squid_image=master"] + +#For testing purposes, when something else than the master +branch = "master" + +#When fence bot has to access another bucket that wasn't created by the VPC module +fence-bot_bucket_access_arns = [] + +#Should you want to deploy HA-squid +deploy_ha_squid = false + +#If ha squid is enabled and you want to set your own capasity +ha-squid_cluster_desired_capasity = 2 + +#If ha squid is enabled and you want to set your own min size +ha-squid_cluster_min_size = 1 + +#If ha squid is enabled and you want to set your own max size +ha-squid_cluster_max_size = 3 + +#Whether or not to deploy the database instance +deploy_sheepdog_db = true + +#Whether or not to deploy the database instance +deploy_fence_db = true + +#Whether or not to deploy the database instance +deploy_indexd_db = true + +#Engine to deploy the db instance +sheepdog_engine = "postgres" + +#Engine to deploy the db instance +fence_engine = "postgres" + +#Engine to deploy the db instance +indexd_engine = "postgres" + +#Instance type for the single proxy instance +single_squid_instance_type = "t2.micro" + +#Let k8s workers be on a /22 subnet per AZ +network_expansion = false + +#Whether or not the storage for the RDS instances should be encrypted +rds_instance_storage_encrypted = true + +#Maximum allocated storage for autosacaling +fence_max_allocated_storage = 0 + +#Maximum allocated storage for autosacaling +sheepdog_max_allocated_storage = 0 + +#Maximum allocated storage for autosacaling +indexd_max_allocated_storage = 0 + +#Used to authenticate with Qualys, which is used for security scanning. Optional +activation_id = "" + +#Used to authenticate with Qualys as well. Also optional +customer_id = "" + +#Whether or not to set up the commons in accordance with FIPS, a federal information standard +fips = false + diff --git a/tf_files/aws/commons_sns/sample.tfvars b/tf_files/aws/commons_sns/sample.tfvars new file mode 100644 index 000000000..c56256579 --- /dev/null +++ b/tf_files/aws/commons_sns/sample.tfvars @@ -0,0 +1,12 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-12 10:48:11.054601 + +#The type of cluster that the jobs are running in. kube-aws is deprecated, so it should mostly be EKS clusters +#Acceptable values are: "EKS", "kube-aws" +cluster_type = "EKS" + +#The email addresses that notifications from this instance should be sent to +emails = ["someone@uchicago.edu","otherone@uchicago.edu"] + +#The subject of the emails sent to the addresses enumerated previously +topic_display = "cronjob manitor" + diff --git a/tf_files/aws/commons_vpc_es/sample.tfvars b/tf_files/aws/commons_vpc_es/sample.tfvars new file mode 100644 index 000000000..cc601d123 --- /dev/null +++ b/tf_files/aws/commons_vpc_es/sample.tfvars @@ -0,0 +1,32 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-12 11:33:44.445657 + +#Slack webhook to send alerts to a Slack channel. Slack webhooks are deprecated, so this may need to change at some point +#See: https://api.slack.com/legacy/custom-integrations/messaging/webhooks +slack_webhook = "" + +#A Slack webhook to send alerts to a secondary channel +secondary_slack_webhook = "" + +#The instance type for ElasticSearch. More information on instance types can be found here: +#https://docs.aws.amazon.com/opensearch-service/latest/developerguide/supported-instance-types.html +instance_type = "m4.large.elasticsearch" + +#The size of the attached Elastic Block Store volume, in GB +ebs_volume_size_gb = 20 + +#Boolean to control whether or not this cluster should be encrypted +encryption = "true" + +#How many instances to have in this ElasticSearch cluster +instance_count = 3 + +#For tagging purposes +organization_name = "Basic Service" + +#What version to use when deploying ES +es_version = "6.8" + +#Whether or not to deploy a linked role for ES. A linked role is a role that allows for easier management of ES, by automatically +#granting it the access it needs. For more information, see: https://docs.aws.amazon.com/opensearch-service/latest/developerguide/slr.html +es_linked_role = true + diff --git a/tf_files/aws/commons_vpc_es/variables.tf b/tf_files/aws/commons_vpc_es/variables.tf index 85f035213..b6e41cf03 100644 --- a/tf_files/aws/commons_vpc_es/variables.tf +++ b/tf_files/aws/commons_vpc_es/variables.tf @@ -4,6 +4,7 @@ variable "vpc_name" {} variable "slack_webhook" { default = "" } + variable "secondary_slack_webhook" { default = "" } diff --git a/tf_files/aws/csoc_admin_vm/sample.tfvars b/tf_files/aws/csoc_admin_vm/sample.tfvars new file mode 100644 index 000000000..500c1a75f --- /dev/null +++ b/tf_files/aws/csoc_admin_vm/sample.tfvars @@ -0,0 +1,37 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-12 11:45:02.625524 + +#ID of AWS account the owns the public AMIs +#TODO Figure out what this means +ami_account_id = "707767160287" + +# +#TODO Figure out how to phrase this, I believe it's been used before +csoc_account_id = "433568766270" + +#The region in which to spin up this infrastructure. +aws_region = "us-east-1" + +#The ID of the VPC on which to bring up this VM +csoc_vpc_id = "vpc-e2b51d99" + +#The ID of the subnet on which to bring up this VM +csoc_subnet_id = "subnet-6127013c" + +#The ID of the child account. +child_account_id = "707767160287" + +#The region for the child account +child_account_region = "us-east-1" + +#NOT CURRENTLY USED +child_name = "cdistest" + +#The name of the Elastic Search cluster +elasticsearch_domain = "commons-logs" + +#A list of VPC CIDR blocks that are allowed egress from the security group created by this module +vpc_cidr_list= "" + +#The name of an AWS SSH key pair to attach to EC2 instances. For more information, +#see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html +ssh_key_name= "" \ No newline at end of file diff --git a/tf_files/aws/csoc_admin_vm/variables.tf b/tf_files/aws/csoc_admin_vm/variables.tf index dae2f64fa..c0c846943 100644 --- a/tf_files/aws/csoc_admin_vm/variables.tf +++ b/tf_files/aws/csoc_admin_vm/variables.tf @@ -1,4 +1,5 @@ # id of AWS account that owns the public AMI's + variable "ami_account_id" { # cdis-test default = "707767160287" diff --git a/tf_files/aws/csoc_common_logging/sample.tfvars b/tf_files/aws/csoc_common_logging/sample.tfvars new file mode 100644 index 000000000..d99b428f0 --- /dev/null +++ b/tf_files/aws/csoc_common_logging/sample.tfvars @@ -0,0 +1,35 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-12 12:00:53.938872 + +#ID of the AWS account that owns the public AMIs +csoc_account_id = "433568766270" + +#The AWS region this infrastructure will be spun up in +aws_region = "us-east-1" + +#The child account that will be set as the owner of the resources created by this module +child_account_id = "707767160287" + +#The region in which the child account exists +child_account_region = "us-east-1" + +#The name of the environment that this will run on, for example, kidsfirst, cdistest +common_name = "cdistest" + +#The name of the Elastic Search cluster +elasticsearch_domain = "commons-logs" + +#A cutoff for how long of a response time is accepted, in milliseconds +threshold = "65.0" + +#A webhook to send alerts to a Slack channel +slack_webhook = "" + +#The ARN of a lambda function to send logs to logDNA +log_dna_function = "arn:aws:lambda:us-east-1:433568766270:function:logdna_cloudwatch" + +#Timeout threshold for the Lambda function to wait before exiting +timeout = 300 + +#Memory allocation for the Lambda function, in MB +memory_size = 512 + diff --git a/tf_files/aws/csoc_management-logs/sample.tfvars b/tf_files/aws/csoc_management-logs/sample.tfvars new file mode 100644 index 000000000..3d83cceca --- /dev/null +++ b/tf_files/aws/csoc_management-logs/sample.tfvars @@ -0,0 +1,12 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-12 12:15:45.245756 + +#A list of account IDs that are allowed to use the PutSubscriptionFilter action. For more information, see: +#https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutSubscriptionFilter.html +accounts_id = ["830067555646", "474789003679", "655886864976", "663707118480", "728066667777", "433568766270", "733512436101", "584476192960", "236835632492", "662843554732", "803291393429", "446046036926", "980870151884", "562749638216", "707767160287", "302170346065", "636151780898", "895962626746", "222487244010", "369384647397", "547481746681"] + +#The name of the Elastic Search cluster +elasticsearch_domain = "commons-logs" + +#The S3 bucket used to store logs +log_bucket_name = "management-logs-remote-accounts" + diff --git a/tf_files/aws/csoc_management-logs/variables.tf b/tf_files/aws/csoc_management-logs/variables.tf index 382240b57..93bbd1838 100644 --- a/tf_files/aws/csoc_management-logs/variables.tf +++ b/tf_files/aws/csoc_management-logs/variables.tf @@ -1,4 +1,3 @@ - variable "accounts_id" { type = "list" default = ["830067555646", "474789003679", "655886864976", "663707118480", "728066667777", "433568766270", "733512436101", "584476192960", "236835632492", "662843554732", "803291393429", "446046036926", "980870151884", "562749638216", "707767160287", "302170346065", "636151780898", "895962626746", "222487244010", "369384647397", "547481746681"] diff --git a/tf_files/aws/csoc_qualys_vm/sample.tfvars b/tf_files/aws/csoc_qualys_vm/sample.tfvars new file mode 100644 index 000000000..8c0602fec --- /dev/null +++ b/tf_files/aws/csoc_qualys_vm/sample.tfvars @@ -0,0 +1,43 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-12 12:32:59.347063 + +#The name to use for the Qualys VM. This field is mandatory. This VM will be used +#to run Qualys, a security application. +vm_name = "qualys_scanner_prod" + +#The ID of the VPC to spin up this VM +vpc_id = "vpc-e2b51d99" + +#The CIDR block for the VPC subnet the VM will be +env_vpc_subnet = "10.128.3.0/24" + +#Route table the VM will be associated with +qualys_pub_subnet_routetable_id = "rtb-7ee06301" + +#The name of an AWS SSH key pair to attach to EC2 instances. For more information, +#see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html +ssh_key_name = "rarya_id_rsa" + +#The code used to register with Qualys. This field is mandatory +user_perscode ="20079167409920" + +#A filter to apply against the names of AMIs when searching. We search, rather than specifying a specific image, +#to ensure that all of the latest security updates are present. +image_name_search_criteria = "a04e299c-fb8e-4ee2-9a75-94b76cf20fb2" + +#A filter to apply against the descriptions of AMIs when searching. We search, rather than specifying a specific image, +#to ensure that all of the latest security updates are present. +image_desc_search_criteria = "" + +#Account id of the AMI owner, which is used to further filter the search for an AMI +ami_account_id = "679593333241" + +#Organization for tagging puposes +organization = "PlanX" + +#Environment for tagging purposes +environment = "CSOC" + +#The EC2 instance type to use for VM(s) spun up from this module. For more information on EC2 instance types, see: +#https://aws.amazon.com/ec2/instance-types/ +instance_type = "t3.medium" + diff --git a/tf_files/aws/csoc_qualys_vm/variables.tf b/tf_files/aws/csoc_qualys_vm/variables.tf index 1899cf3f8..f289a9195 100644 --- a/tf_files/aws/csoc_qualys_vm/variables.tf +++ b/tf_files/aws/csoc_qualys_vm/variables.tf @@ -15,6 +15,7 @@ variable "qualys_pub_subnet_routetable_id"{ } # name of aws_key_pair ssh key to attach to VM's + variable "ssh_key_name" { default = "rarya_id_rsa" } diff --git a/tf_files/aws/data_bucket/sample.tfvars b/tf_files/aws/data_bucket/sample.tfvars new file mode 100644 index 000000000..3887b7ba9 --- /dev/null +++ b/tf_files/aws/data_bucket/sample.tfvars @@ -0,0 +1,13 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-12 12:55:22.764041 + +#The name of the bucket to be created +bucket_name= "" + +#Value for 'Environment' key to tag the new resources with +environment= "" + +#This variable is used to conditionally create a cloud trail. +#Using this module to create another bucket in the same "environment" with a nonzero count for this variable will +#result in an error because aspects of the cloud trail will already exist. +cloud_trail_count = "1" + diff --git a/tf_files/aws/data_bucket/variables.tf b/tf_files/aws/data_bucket/variables.tf index 22134e193..db8710a6c 100644 --- a/tf_files/aws/data_bucket/variables.tf +++ b/tf_files/aws/data_bucket/variables.tf @@ -1,7 +1,9 @@ variable "bucket_name" {} + variable "environment" { # value for 'Environment' key to tag the new resources with } + variable "cloud_trail_count" { # this variable is used to conditionally create a cloud trail # Using this module to create another bucket in the same "environment" with nonzero diff --git a/tf_files/aws/data_bucket_queue/sample.tfvars b/tf_files/aws/data_bucket_queue/sample.tfvars index ed55578f4..f2756707e 100644 --- a/tf_files/aws/data_bucket_queue/sample.tfvars +++ b/tf_files/aws/data_bucket_queue/sample.tfvars @@ -1,2 +1,3 @@ +#This bucket is required by config.tf bucket_name=WHATEVER diff --git a/tf_files/aws/demolab/sample.tfvars b/tf_files/aws/demolab/sample.tfvars new file mode 100644 index 000000000..54a885258 --- /dev/null +++ b/tf_files/aws/demolab/sample.tfvars @@ -0,0 +1,16 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-12 13:08:48.948730 + +#The name of the VPC this demo lab will be located on +vpc_name= "" + +#The EC2 instance type to use for VM(s) spun up from this module. For more information on EC2 instance types, see: +#https://aws.amazon.com/ec2/instance-types/ +instance_type = "t3.small" + +#The number of instances in the demo lab +instance_count = 5 + +#The name of an AWS SSH key pair to attach to EC2 instances. For more information, +#see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html +ssh_public_key= "" + diff --git a/tf_files/aws/eks/sample.tfvars b/tf_files/aws/eks/sample.tfvars new file mode 100644 index 000000000..da176e73e --- /dev/null +++ b/tf_files/aws/eks/sample.tfvars @@ -0,0 +1,129 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-12 13:47:23.877126 + +#The VPC this EKS cluster should be spun up +vpc_name= "" + +#The name of an AWS SSH key pair to attach to EC2 instances. For more information, +#see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html +ec2_keyname = "someone@uchicago.edu" + +#The EC2 instance type to use for VM(s) spun up from this module. For more information on EC2 instance types, see: +#https://aws.amazon.com/ec2/instance-types/ +instance_type = "t3.large" + +#The type of instance to use for nodes running jupyter +jupyter_instance_type = "t3.large" + +#The type of instance to use for nodes running workflows +workflow_instance_type = "t3.2xlarge" + +#This is the CIDR of the network your adminVM is on. Since the commons creates its own VPC, you need to pair them up to allow communication between them later. +peering_cidr = "10.128.0.0/20" + +#A CIDR block, if needed to expand available addresses for workflows +secondary_cidr_block = "" + +#The ID of the VPC this cluster is to be peered with +peering_vpc_id = "vpc-e2b51d99" + +#This is the policy that was created before that allows the cluster to access the users bucket in bionimbus. +#Usually the same name as the VPC, but not always. +users_policy= "" + +#The size of the volumes for the workers, in GB +worker_drive_size = 30 + +#The EKS version this cluster should run against +eks_version = "1.16" + +#Whether you want your workers on a /24 or /23 subnet, /22 is available, but the VPC module should have been deployed +#using the `network_expansion = true` variable, otherwise wks will fail +workers_subnet_size = 24 + +#The script used to start up the workers +#https://github.com/uc-cdis/cloud-automation/tree/master/flavors/eks` +bootstrap_script = "bootstrap-with-security-updates.sh" + +#The script used to start up Jupyter nodes +#https://github.com/uc-cdis/cloud-automation/tree/master/flavors/eks +jupyter_bootstrap_script = "bootstrap-with-security-updates.sh" + +#If your bootstrap script requires another kernel, you could point to it with this variable. Available kernels will be in +#`gen3-kernels` bucket. +kernel = "N/A" + +#The size, in GB, of the drives to be attached to Jupyter workers\ +jupyter_worker_drive_size = 30 + +#A script used to start up a workflow +workflow_bootstrap_script = "bootstrap.sh" + +#The size, in GB, of the drives to be attached to workflow workers +workflow_worker_drive_size = 30 + +#CIDRs you want to skip the proxy when going out +cidrs_to_route_to_gw = [] + +#Organization name, for tagging purposes +organization_name = "Basic Services" + +#The number of Jupyter workers +jupyter_asg_desired_capacity = 0 + +#The maximum number of Jupyter workers +jupyter_asg_max_size = 10 + +#The minimum number of Jupyter workers +jupyter_asg_min_size = 0 + +#The number of Jupyter workers +workflow_asg_desired_capacity = 0 + +#The maximum number of Jupyter workers +workflow_asg_max_size = 50 + +#The minimum number of Jupyter workers +workflow_asg_min_size = 0 + +#Whether to add a service account to your cluster +iam-serviceaccount = true + +#URL for the lambda function to use to check for the proxy +domain_test = "www.google.com" + +#Is HA squid deployed? +ha_squid = false + +#Deploy workflow nodepool? +deploy_workflow = false + +#If migrating from single to ha, set to true, should not disrrupt connectivity +dual_proxy = false + +#Should all Jupyter notebooks exist in the same AZ? +single_az_for_jupyter = false + +#Thumbprint for the AWS OIDC identity provider +oidc_eks_thumbprint = ["9e99a48a9960b14926bb7f3b02e22da2b0ab7280"] + +#The ARN of an SNS topic that will be used to send alerts +sns_topic_arn = "arn:aws:sns:us-east-1:433568766270:planx-csoc-alerts-topic" + +#Used for authenticating Qualys software, which is used to perform security scans +activation_id = "" + +#Used for authenticating Qualys software, which is used to perform security scans +customer_id = "" + +#This controls whether or not we use FIPS enabled AMIs +fips = false + +#The key that was used to encrypt the FIPS enabled AMI. This is needed so ASG can decrypt the AMI +fips_ami_kms = "arn:aws:kms:us-east-1:707767160287:key/mrk-697897f040ef45b0aa3cebf38a916f99" + +#This is the FIPS enabled AMI in cdistest account +fips_enabled_ami = "ami-0de87e3680dcb13ec" + +#A list of AZs to be used by EKS nodes +availability_zones = ["us-east-1a", "us-east-1c", "us-east-1d"] + diff --git a/tf_files/aws/eks/variables.tf b/tf_files/aws/eks/variables.tf index b4275dc6b..0dc78a8ab 100644 --- a/tf_files/aws/eks/variables.tf +++ b/tf_files/aws/eks/variables.tf @@ -1,4 +1,3 @@ - variable "vpc_name" {} variable "ec2_keyname" { @@ -31,7 +30,6 @@ variable "peering_vpc_id" { variable "users_policy" {} - variable "worker_drive_size" { default = 30 } @@ -149,17 +147,20 @@ variable "customer_id" { } # This controls whether or not we use FIPS enabled AMI's + variable "fips" { default = false } # the key that was used to encrypt the FIPS enabled AMI # This is needed to ASG can decrypt the ami + variable "fips_ami_kms" { default = "arn:aws:kms:us-east-1:707767160287:key/mrk-697897f040ef45b0aa3cebf38a916f99" } # This is the FIPS enabled AMI in cdistest account. + variable "fips_enabled_ami" { default = "ami-0de87e3680dcb13ec" } diff --git a/tf_files/aws/encrypted-rds/sample.tfvars b/tf_files/aws/encrypted-rds/sample.tfvars index 09468f5a7..f3e1574d3 100644 --- a/tf_files/aws/encrypted-rds/sample.tfvars +++ b/tf_files/aws/encrypted-rds/sample.tfvars @@ -1,2 +1,210 @@ -# Mandatory variables -vpc_name = devplanetv1 +#Automatically generated from a corresponding variables.tf on 2022-07-12 15:15:28.628361 + +#The name of the VPC this RDS instance will be attached to +vpc_name = "vpcName" + +#The CIDR block used in the VPC +vpc_cidr_block = "172.24.17.0/20" + +#The region to spin up all the resources in +aws_region = "us-east-1" + +# +#TODO Look this one up and get it right +csoc_account_id = "433568766270" + +#The CIDR for the peering VPC +peering_cidr = "10.128.0.0/20" + +#The size, in GB, of the Fence DB +fence_db_size = 10 + +#The size, in GB, of the Sheepdog DB +sheepdog_db_size = 10 + +#The size, in GB, of the Indexd DB +indexd_db_size = 10 + +#The password for the Fence DB +db_password_fence= "" + +#The password for the GDCAPI DB +db_password_gdcapi = "" + +#The password for the Peregrine DB +db_password_peregrine= "" + +#The password for the Sheepdog DB +db_password_sheepdog= "" + +#The password for the Indexd DB +db_password_indexd= "" + +#A snapshot of an RDS databse, used to populate this DB with data +fence_snapshot = "" + +#A snapshot of an RDS databse, used to populate this DB with data +gdcapi_snapshot = "" + +#A snapshot of an RDS databse, used to populate this DB with data +peregrine_snapshot = "" + +#A snapshot of an RDS databse, used to populate this DB with data +sheepdog_snapshot = "" + +#A snapshot of an RDS databse, used to populate this DB with data +indexd_snapshot = "" + +#The instance type to run the Fence DB on +#https://aws.amazon.com/rds/instance-types/ +fence_db_instance = "db.t3.small" + +#The instance type to run the Sheepdog DB on +#https://aws.amazon.com/rds/instance-types/ +sheepdog_db_instance = "db.t3.small" + +#The instance type to run the Indexd DB on +#https://aws.amazon.com/rds/instance-types/ +indexd_db_instance = "db.t3.small" + +#The ID of the peered VPC +peering_vpc_id = "vpc-e2b51d99" + +#A webhook used to send alerts in a Slack channel +#https://api.slack.com/messaging/webhooks +slack_webhook = "" + +#A webhook used to send alerts in a secondary Slack channel +#https://api.slack.com/messaging/webhooks +secondary_slack_webhook = "" + +#Threshold for database storage utilization. This is a number that represents a percentage of storage used. +#Once this alarm is triggered, the webhook is used to send a notification via Slack +alarm_threshold = "85" + +#Organization used for tagging & tracking purposes +organization_name = "Basic Service" + +#Boolean that represents if Fence should be deployed in a high-availability configuration +fence_ha = false + +#Boolean that represents if Sheepdog should be deployed in a high-availability configuration +sheepdog_ha = false + +#Boolean that represents if Indexd should be deployed in a high-availabiity configuration +indexd_ha = false + +#The maintenance window for Fence +#Format is ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00" +fence_maintenance_window = "SAT:09:00-SAT:09:59" + +#Boolean that represents if the RDS instance's storage should be encrypted +rds_instance_storage_encrypted = true + +#The maintenance window for Sheepdog +#Format is ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00" +sheepdog_maintenance_window = "SAT:10:00-SAT:10:59" + +#The maintenance window for Indexd +#Format is ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00" +indexd_maintenance_window = "SAT:11:00-SAT:11:59" + +#How many snapshots of the database should be kept at a time +fence_backup_retention_period = "4" + +#How many snapshots of the database should be kept at a time +sheepdog_backup_retention_period = "4" + +#How many snapshots of the database should be kept at a time +indexd_backup_retention_period = "4" + +#The time range when Fence can be backed up +#Format is ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00" +fence_backup_window = "06:00-06:59" + +#The time range when Sheepdog can be backed up +#Format is ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00" +sheepdog_backup_window = "07:00-07:59" + +#The time range when Indexd can be backed up +#Format is ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00" +indexd_backup_window = "08:00-08:59" + +#The version of the database software used to run the database +fence_engine_version = "13.3" + +#The version of the database software used to run the database +sheepdog_engine_version = "13.3" + +#The version of the database software used to run the database +indexd_engine_version = "13.3" + +#Whether the database can automatically update minor versions +fence_auto_minor_version_upgrade = "true" + +#Whether the database can automatically update minor versions +indexd_auto_minor_version_upgrade = "true" + +#Whether the database can automatically update minor versions +sheepdog_auto_minor_version_upgrade = "true" + +#Name of the Fence database. Not the same as the instance identifier +fence_database_name = "fence" + +#Name of the Sheepdog database. Not the same as the instance identifier +sheepdog_database_name = "gdcapi" + +#Name of the Indexd database. Not the same as the isntance identifier +indexd_database_name = "indexd" + +#The username for the Fence database +fence_db_username = "fence_user" + +#The username for the Sheepdog database +sheepdog_db_username = "sheepdog" + +#the username for the Indexd database +indexd_db_username = "indexd_user" + +#Boolean that controls if the database is allowed to automatically upgrade major versions +fence_allow_major_version_upgrade = "true" + +#Boolean that controls if the database is allowed to automatically upgrade major versions +sheepdog_allow_major_version_upgrade = "true" + +#Boolean that controls if the database is allowed to automatically upgrade major versions +indexd_allow_major_version_upgrade = "true" + +#Whether or not to deploy the database instance +deploy_sheepdog_db = true + +#Whether or not to deploy the database instance +deploy_fence_db = true + +#Whether or not to deploy the database instance +deploy_indexd_db = true + +#Engine to deploy the db instance +sheepdog_engine = "postgres" + +#Engine to deploy the db instance +fence_engine = "postgres" + +#Engine to deploy the db instance +indexd_engine = "postgres" + +#The security group to add the DB instances to +security_group_local_id = "securityGroupId" + +#The subnet group for databases that this DB should be spun up in +aws_db_subnet_group_name = "subnetName" + +#Maximum allocated storage for autoscaling +fence_max_allocated_storage = 0 + +#Maximum allocated storage for autoscaling +sheepdog_max_allocated_storage = 0 + +#Maximum allocated storage for autoscaling +indexd_max_allocated_storage = 0 + diff --git a/tf_files/aws/kubecost/sample.tfvars b/tf_files/aws/kubecost/sample.tfvars index 040e428eb..540bd88a1 100644 --- a/tf_files/aws/kubecost/sample.tfvars +++ b/tf_files/aws/kubecost/sample.tfvars @@ -1,2 +1,14 @@ -# Mandatory variables -#vpc_name = devplanetv1 +#Automatically generated from a corresponding variables.tf on 2022-07-12 15:27:27.277857 + +#The name of the VPC to bring these resources up in +vpc_name = "" + +#This is used if the resource is set up as a secondary node +parent_account_id = "" + +#The S3 bucket in which to store the generated Cost and Usage report +cur_s3_bucket = "" + +#This is used if the resource is set up as a primary node. It specifies the account ID for the linked secondary node +slave_account_id = "" + diff --git a/tf_files/aws/publicvm/sample.tfvars b/tf_files/aws/publicvm/sample.tfvars index 60a7f61d2..9893e5b3f 100644 --- a/tf_files/aws/publicvm/sample.tfvars +++ b/tf_files/aws/publicvm/sample.tfvars @@ -1,17 +1,30 @@ -vpc_name = "THE_VPC_NAME - default is: vadcprod" +#Automatically generated from a corresponding variables.tf on 2022-07-12 16:07:24.564137 -instance_type = "default is: t3.small" +#The name of the VPC these resources will be spun up in +vpc_name = "vadcprod" -ssh_in_secgroup = "should already exist - default is: ssh_eks_vadcprod" +#The EC2 instance type to use for VM(s) spun up from this module. For more information on EC2 instance types, see: +#https://aws.amazon.com/ec2/instance-types/ +instance_type = "t3.small" -egress_secgroup = "should already exist - default is: out" +#Security group for SSH +ssh_in_secgroup = "ssh_eks_vadcprod" -subnet_name = "public subnet under vpc_name - default is: public" +#The name of the security group for egress. This should already exist +egress_secgroup = "out" -volume_size = "for the vm - default is 500" +#The public subnet located under vpc_name. By default is set to public +subnet_name = "public" -policies = ["list of policies ARNs to attach to the role that will be attached to this VM"] +#Volume size of the VM in GB (technically GiB, but what's a few bits among friends?) +volume_size = 500 -ami = "ami to use, if empty (default) latest ubuntu available will be used" +#List of policy ARNs to attach to the role that will be attached to this VM +policies = [] + +#The AMI to use for the machine, if nothing is specified, the latest version of Ubuntu available will be used +ami = "" + +#The name for the VM, should be unique. +vm_name= "" -vm_name = "Name for the vm, should be unique, there is no default value for this one, so you must set something here" diff --git a/tf_files/aws/publicvm/variables.tf b/tf_files/aws/publicvm/variables.tf index 4ea97a19f..2698e1940 100644 --- a/tf_files/aws/publicvm/variables.tf +++ b/tf_files/aws/publicvm/variables.tf @@ -6,7 +6,6 @@ variable "instance_type" { default = "t3.small" } - variable "ssh_in_secgroup" { default = "ssh_eks_vadcprod" } diff --git a/tf_files/aws/rds/sample.tfvars b/tf_files/aws/rds/sample.tfvars index 88d0fc195..c58a8b209 100644 --- a/tf_files/aws/rds/sample.tfvars +++ b/tf_files/aws/rds/sample.tfvars @@ -1,58 +1,156 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-12 16:47:21.465202 -# Mandatory variables -rds_instance_allocated_storage = 20 -rds_instance_engine = "MySQL,postgres,oracle,aurora,SQL,MariaDB" -rds_instance_engine_version = "version for your engine, basically depends on the variable above" -rds_instance_username = "usern ame for access" -#rds_instance_password = "password for access" -rds_instance_port = "1433" -rds_instance_identifier = "planx-tests-db" -#rds_instance_db_subnet_group_name = "subnet group name" -#rds_instance_vpc_security_group_ids = ["sg-XXXXXXXXXX"] - - -# Optional variables, uncomment and change values accordingly - -#rds_instance_name = "what are you naming the db" -#rds_instance_allow_major_version_upgrade = true -#rds_instance_apply_immediately = false -#rds_instance_auto_minor_version_upgrade = true -#rds_instance_availability_zone = "" -#rds_instance_backup_retention_period = 0 -#rds_instance_backup_window = "03:46-04:16" -#rds_instance_character_set_name = "" -#rds_instance_copy_tags_to_snapshot = false -#rds_instance_create = true -#rds_instance_deletion_protection = false -#rds_instance_enabled_cloudwatch_logs_exports = [] -#rds_instance_iam_database_authentication_enabled = false -#rds_instance_instance_class = "db.t3.micro" -#rds_instance_iops = 0 -#rds_instance_kms_key_id = "" -#rds_instance_license_model = false -#rds_instance_maintenance_window = "Mon:00:00-Mon:03:00" -#rds_instance_max_allocated_storage = 0 -#rds_instance_monitoring_interval = 0 -#rds_instance_monitoring_role_arn = "" -#rds_instance_monitoring_role_name = "rds-monitoring-role" -#rds_instance_multi_az = false -#rds_instance_option_group_name = "" -#rds_instance_parameter_group_name = "" -#rds_instance_performance_insights_enabled = false -#rds_instance_performance_insights_retention_period = 7 -#rds_instance_publicly_accessible = false -#rds_instance_replicate_source_db = "" -#rds_instance_skip_final_snapshot = false -#rds_instance_snapshot_identifier = "" -#rds_instance_storage_encrypted = false -#rds_instance_storage_type = "gp2" -#rds_instance_tags = {"something"="stuff", "Something-else"="more-stuff"} -#rds_instance_timeouts = {create = "40m", update = "80m", delete = "40m"} -#rds_instance_timezone = "" -#rds_instance_final_snapshot_identifier = "" - -# backups -#rds_instance_backup_enabled = false -#rds_instance_backup_kms_key = "" -#rds_instance_backup_bucket_name = "" +#Whether to create this resource or not? +rds_instance_create = true + +#Allocated storage in gibibytes +rds_instance_allocated_storage = 20 + +#What type of storage to use for the database. +#More information can be found here: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html +rds_instance_storage_type = "gp2" + +#The database engine to use. Information on types and pricing can be found here: +#https://aws.amazon.com/rds/pricing/?pg=ln&sec=hs +rds_instance_engine = "" + +#The engine version to use. If auto_minor_version_upgrade is enabled, you can provide a prefix of the +#version such as 5.7 (for 5.7.10) and this attribute will ignore differences in the patch version automatically (e.g. 5.7.17) +rds_instance_engine_version = "" + +#The instance type of the RDS instance +#https://aws.amazon.com/rds/instance-types/ +rds_instance_instance_class = "db.t2.micro" + +#Name for the database to be created +rds_instance_name = "" + +#The name of the RDS instance, if omitted, Terraform will assign a random, unique identifier +rds_instance_identifier= "" + +#Username to use for the RDS instance +rds_instance_username = "" + +#Password to use for the RDS instance +rds_instance_password = "" + +#A DB parameter group is a reusable template of values for things like RAM allocation that can be associated with a DB instance. +#For more info, see: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithParamGroups.html +rds_instance_parameter_group_name = "" + +#Indicates that major version upgrades are allowed +rds_instance_allow_major_version_upgrade = true + +#Specifies whether any database modifications are applied immediately, or during the next maintenance window +rds_instance_apply_immediately = false + +#Indicates that minor engine upgrades will be applied automatically to the DB instance during the maintenance window +rds_instance_auto_minor_version_upgrade = true + +#The number of days to retain backups for. Must be between 0 and 35 +rds_instance_backup_retention_period = 0 + +#The daily time range (in UTC) during which automated backups are created if they are enabled. Example: '09:46-10:16'. Must not overlap with maintenance_window +rds_instance_backup_window = "03:46-04:16" + +#Name of DB subnet group. DB instance will be created in the VPC associated with the DB subnet group +rds_instance_db_subnet_group_name = "" + +#The window to perform maintenance in +rds_instance_maintenance_window = "Mon:00:00-Mon:03:00" + +#Specifies if the RDS instance is multi-AZ +rds_instance_multi_az = false + +#Name of the DB option group to associate +rds_instance_option_group_name = "" + +#Bool to control if instance is publicly accessible +rds_instance_publicly_accessible = false + +#Determines if a final snapshot will be taken of the database before it is deleted. False means that a backup will be taken, +#and true means that none will be +rds_instance_skip_final_snapshot = false + +#Specifies whether the DB instance is encrypted +rds_instance_storage_encrypted = false + +#A list of VPC security groups to associate with the instance +#For more information, see: https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html +rds_instance_vpc_security_group_ids = [] + +#Tags for the instance, used for searching and filtering +rds_instance_tags = {} + +#The port on which the DB accepts connections +rds_instance_port = "" + +#License model information for this DB instance +rds_instance_license_model = "" + +#Specifies whether Performance Insights are enabled +rds_instance_performance_insights_enabled = false + +#The amount of time in days to retain Performance Insights data. Either 7 (7 days) or 731 (2 years). +rds_instance_performance_insights_retention_period = 7 + +#(Optional) Updated Terraform resource management timeouts. Applies to `aws_db_instance` in particular to permit resource management times +rds_instance_timeouts = { create = "40m" update = "80m" delete = "40m" } + +#Name of the IAM role which will be created when create_monitoring_role is enabled. +rds_instance_monitoring_role_name = "rds-monitoring-role" + +#Specifies the value for Storage Autoscaling +rds_instance_max_allocated_storage = 0 + +#The Availability Zone of the RDS instance +rds_instance_availability_zone = "" + +#The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to CloudWatch Logs. Must be specified if monitoring_interval is non-zero. +rds_instance_monitoring_role_arn = "" + +#On delete, copy all Instance tags to the final snapshot (if final_snapshot_identifier is specified) +rds_instance_copy_tags_to_snapshot = false + +#The ARN for the KMS encryption key. If creating an encrypted replica, set this to the destination KMS ARN. If storage_encrypted is set to true and kms_key_id is not specified the default KMS key created in your account will be used +rds_instance_kms_key_id = "" + +#List of log types to enable for exporting to CloudWatch logs. If omitted, no logs will be exported. Valid values (depending on engine): alert, audit, error, general, listener, slowquery, trace, postgresql (PostgreSQL), upgrade (PostgreSQL). +rds_instance_enabled_cloudwatch_logs_exports = [] + +#The amount of provisioned IOPS. Setting this implies a storage_type of 'io1' +rds_instance_iops = 0 + +#The database can't be deleted when this value is set to true. +rds_instance_deletion_protection = false + +#Specifies whether or mappings of AWS Identity and Access Management (IAM) accounts to database accounts is enabled +rds_instance_iam_database_authentication_enabled = false + +#(Optional) Time zone of the DB instance. timezone is currently only supported by Microsoft SQL Server. The timezone can only be set on creation. See MSSQL User Guide for more information. +rds_instance_timezone = "" + +#The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0. Valid Values: 0, 1, 5, 10, 15, 30, 60. +rds_instance_monitoring_interval = 0 + +#Specifies whether or not to create this database from a snapshot. This correlates to the snapshot ID you'd find in the RDS console, e.g: rds:production-2015-06-26-06-05. +rds_instance_snapshot_identifier = "" + +#Specifies that this resource is a Replicate database, and to use this value as the source database. This correlates to the identifier of another Amazon RDS Database to replicate. +rds_instance_replicate_source_db = "" + +#Create IAM role with a defined name that permits RDS to send enhanced monitoring metrics to CloudWatch Logs. +rds_instance_create_monitoring_role = false + +#(Optional) The character set name to use for DB encoding in Oracle instances. This can't be changed. See Oracle Character Sets Supported in Amazon RDS for more information +rds_instance_character_set_name = "" + +#To enable backups onto S3 +rds_instance_backup_enabled = false + +#KMS to enable backups onto S3 +rds_instance_backup_kms_key = "" + +#The bucket to send bacups to +rds_instance_backup_bucket_name = "" diff --git a/tf_files/aws/rds/variables.tf b/tf_files/aws/rds/variables.tf index f97e082d7..c887c202f 100644 --- a/tf_files/aws/rds/variables.tf +++ b/tf_files/aws/rds/variables.tf @@ -1,4 +1,3 @@ - variable "rds_instance_create" { description = "Whether to create this resource or not?" # type = bool @@ -191,7 +190,6 @@ variable "rds_instance_availability_zone" { default = "" } - variable "rds_instance_final_snapshot_identifier" { description = "The name of your final DB snapshot when this DB instance is deleted." # type = "string" @@ -200,7 +198,7 @@ variable "rds_instance_final_snapshot_identifier" { variable "rds_instance_monitoring_role_arn" { description = "The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to CloudWatch Logs. Must be specified if monitoring_interval is non-zero." -# type = "string" +# type = "string"ß default = "" } diff --git a/tf_files/aws/rds_snapshot/sample.tfvars b/tf_files/aws/rds_snapshot/sample.tfvars new file mode 100644 index 000000000..a471c2fed --- /dev/null +++ b/tf_files/aws/rds_snapshot/sample.tfvars @@ -0,0 +1,17 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-12 16:51:07.398804 + +#The AWS region this snapshot will be taken from +aws_region = "us-east-1" + +#The VPC this snapshot will be taken from +vpc_name= "" + +#The RDS ID that corresponds to the indexd database +indexd_rds_id= "" + +#The RDS ID that corresponds to the Fence database +fence_rds_id= "" + +#The RDS ID that corresponds to the Sheepdog database +sheepdog_rds_id= "" + diff --git a/tf_files/aws/rds_snapshot/variables.tf b/tf_files/aws/rds_snapshot/variables.tf index 1065a13c7..8491e8a8e 100644 --- a/tf_files/aws/rds_snapshot/variables.tf +++ b/tf_files/aws/rds_snapshot/variables.tf @@ -5,10 +5,13 @@ variable "aws_region" { variable "vpc_name" {} # rds instance id + variable "indexd_rds_id" {} # rds instance id + variable "fence_rds_id" {} # rds instance id + variable "sheepdog_rds_id" {} diff --git a/tf_files/aws/role/sample.tfvars b/tf_files/aws/role/sample.tfvars index 49f6fceb0..0e2e3ff71 100644 --- a/tf_files/aws/role/sample.tfvars +++ b/tf_files/aws/role/sample.tfvars @@ -1,3 +1,24 @@ -rolename="rolename" +#The name of the role +rolename="" + +#A description of the role description="Role created with gen3 awsrole" + +#A path to attach to the role. For more information, see: +#https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-friendly-names path="/gen3_service/" + +#Assume-role policy to attach to the role +ar_policy = < Date: Mon, 3 Oct 2022 16:38:54 -0500 Subject: [PATCH 02/29] feat(tf-cleanup-script): Added script to clean up tf plugin dirs (#1896) Co-authored-by: Edward Malinowski --- files/scripts/tf-cleanup.sh | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 files/scripts/tf-cleanup.sh diff --git a/files/scripts/tf-cleanup.sh b/files/scripts/tf-cleanup.sh new file mode 100644 index 000000000..182d35c14 --- /dev/null +++ b/files/scripts/tf-cleanup.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +for users in $(cut -d: -f1 /etc/passwd); do + for directory in $(find /home/$users/.local/share/gen3 -name .terraform); do + echo "Removing $directory/plugins" >> /terraformScriptLogs-$(date -u +%Y%m%d)) + rm -rf $directory/plugins + done +done From 11bf8ea5a3d80ba45aaf133d55dde66be2d10c04 Mon Sep 17 00:00:00 2001 From: Ajo Augustine Date: Wed, 5 Oct 2022 09:02:10 -0500 Subject: [PATCH 03/29] chore: filter IP adderss from dig output (#2049) --- gen3/bin/netpolicy.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gen3/bin/netpolicy.sh b/gen3/bin/netpolicy.sh index a7245a434..eb01eb737 100644 --- a/gen3/bin/netpolicy.sh +++ b/gen3/bin/netpolicy.sh @@ -192,7 +192,7 @@ gen3_net_db_access() { local ip serviceName="$1" hostname="$(gen3 db creds "$serviceName" | jq -r .db_host)" - ip="$(dig +short "$hostname")" + ip="$(dig +short "$hostname" | grep -o '[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}')" if ! gen3_net_isIp "$ip"; then gen3_log_err "gen3_net_db_access" "unable to determine address of $serviceName database" return 1 From 54510104637912bb2ccee62e43723b7ee631eae6 Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Tue, 11 Oct 2022 18:09:47 -0500 Subject: [PATCH 04/29] fix: add extra labels to argo-wrapper (#2053) --- kube/services/argo-wrapper/argo-wrapper-deploy.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kube/services/argo-wrapper/argo-wrapper-deploy.yaml b/kube/services/argo-wrapper/argo-wrapper-deploy.yaml index f00bd2cc2..cbd734739 100644 --- a/kube/services/argo-wrapper/argo-wrapper-deploy.yaml +++ b/kube/services/argo-wrapper/argo-wrapper-deploy.yaml @@ -21,7 +21,9 @@ spec: tags.datadoghq.com/service: "argo-wrapper" netnolimit: "yes" public: "yes" + GEN3_ENV_LABEL GEN3_ARGO-WRAPPER_VERSION + GEN3_DATE_LABEL spec: affinity: podAntiAffinity: From 1a9c8535a44c99c4e132441ed8654e77e5a5aa45 Mon Sep 17 00:00:00 2001 From: Pauline Ribeyre <4224001+paulineribeyre@users.noreply.github.com> Date: Tue, 18 Oct 2022 12:00:22 -0500 Subject: [PATCH 05/29] Update comment in Arborist nginx conf (#2057) --- kube/services/revproxy/gen3.nginx.conf/arborist-service.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/revproxy/gen3.nginx.conf/arborist-service.conf b/kube/services/revproxy/gen3.nginx.conf/arborist-service.conf index 942307017..0e492eb71 100644 --- a/kube/services/revproxy/gen3.nginx.conf/arborist-service.conf +++ b/kube/services/revproxy/gen3.nginx.conf/arborist-service.conf @@ -87,7 +87,7 @@ location = /authz/mapping { set $proxy_service "arborist"; set $upstream http://${arborist_release_name}-service.$namespace.svc.cluster.local; - # Do not pass the username arg here! Otherwise anyone can see anyone's access. + # Do not pass the username arg here! Otherwise anyone can see anyone's access for Arborist<4.0.0. # Arborist will fall back to parsing the jwt for username. proxy_pass $upstream/auth/mapping; } From 08960990f3a80e39f5ce50db3b72bf568324b162 Mon Sep 17 00:00:00 2001 From: Ajo Augustine Date: Fri, 21 Oct 2022 15:00:17 -0500 Subject: [PATCH 06/29] update squid webwhitelist (#2060) --- files/squid_whitelist/web_whitelist | 1 + 1 file changed, 1 insertion(+) diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index 1bf67da16..9955eff9c 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -143,6 +143,7 @@ sa-update.space-pro.be security.debian.org services.mathworks.com streaming.stat.iastate.edu +us-east4-docker.pkg.dev us-central1-docker.pkg.dev www.google.com www.icpsr.umich.edu From 22a738fc848bcb73754307ca351dd6c513ddb3d1 Mon Sep 17 00:00:00 2001 From: Ajo Augustine Date: Tue, 25 Oct 2022 13:03:53 -0500 Subject: [PATCH 07/29] adding .pedscommons.org to whitelist (#2062) --- files/squid_whitelist/web_wildcard_whitelist | 1 + 1 file changed, 1 insertion(+) diff --git a/files/squid_whitelist/web_wildcard_whitelist b/files/squid_whitelist/web_wildcard_whitelist index 1b87923e4..1421f6d5d 100644 --- a/files/squid_whitelist/web_wildcard_whitelist +++ b/files/squid_whitelist/web_wildcard_whitelist @@ -75,6 +75,7 @@ .paloaltonetworks.com .pandemicresponsecommons.org .perl.org +.pedscommons.org .planx-ci.io .planx-pla.net .postgresql.org From 90f30caa534eb50b774c3204ae1934cb1955345a Mon Sep 17 00:00:00 2001 From: cmlsn <100160785+cmlsn@users.noreply.github.com> Date: Wed, 26 Oct 2022 14:50:44 -0700 Subject: [PATCH 08/29] chore - modifying nginx settings to allow larger uploads that are causing errors on manifest upload. (#2063) --- gen3/lib/manifestDefaults/modsec/modsecurity.conf | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/gen3/lib/manifestDefaults/modsec/modsecurity.conf b/gen3/lib/manifestDefaults/modsec/modsecurity.conf index 117d92e00..508834620 100644 --- a/gen3/lib/manifestDefaults/modsec/modsecurity.conf +++ b/gen3/lib/manifestDefaults/modsec/modsecurity.conf @@ -39,15 +39,15 @@ SecRule REQUEST_HEADERS:Content-Type "application/json" \ # to the size of data, with files excluded. You want to keep that value as # low as practical. # -SecRequestBodyLimit 13107200 -SecRequestBodyNoFilesLimit 131072 +SecRequestBodyLimit 524288000 +SecRequestBodyNoFilesLimit 1048576 # What do do if the request body size is above our configured limit. # Keep in mind that this setting will automatically be set to ProcessPartial # when SecRuleEngine is set to DetectionOnly mode in order to minimize # disruptions when initially deploying ModSecurity. # -SecRequestBodyLimitAction Reject +SecRequestBodyLimitAction ProcessPartial # Verify that we've correctly processed the request body. # As a rule of thumb, when failing to process a request body From 902f442b77a721977f88db2836a3c617ca00264b Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Thu, 27 Oct 2022 12:50:39 -0700 Subject: [PATCH 09/29] feat(datadog): enable for DICOM Viewer and DICOM Server (Orthanc) (#2065) --- .../dicom-server/dicom-server-deploy.yaml | 25 +++++++++++++++++++ .../dicom-viewer/dicom-viewer-deploy.yaml | 25 +++++++++++++++++++ 2 files changed, 50 insertions(+) diff --git a/kube/services/dicom-server/dicom-server-deploy.yaml b/kube/services/dicom-server/dicom-server-deploy.yaml index a2c1a2c03..b2ef0834e 100644 --- a/kube/services/dicom-server/dicom-server-deploy.yaml +++ b/kube/services/dicom-server/dicom-server-deploy.yaml @@ -24,6 +24,31 @@ spec: containers: - name: dicom-server GEN3_DICOM-SERVER_IMAGE + env: + - name: DD_ENABLED + valueFrom: + configMapKeyRef: + name: manifest-global + key: dd_enabled + optional: true + - name: DD_ENV + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/env'] + - name: DD_SERVICE + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/service'] + - name: DD_VERSION + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/version'] + - name: DD_LOGS_INJECTION + value: "true" + - name: DD_AGENT_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP imagePullPolicy: Always readinessProbe: httpGet: diff --git a/kube/services/dicom-viewer/dicom-viewer-deploy.yaml b/kube/services/dicom-viewer/dicom-viewer-deploy.yaml index e7d05903b..d1fb8ce55 100644 --- a/kube/services/dicom-viewer/dicom-viewer-deploy.yaml +++ b/kube/services/dicom-viewer/dicom-viewer-deploy.yaml @@ -20,6 +20,31 @@ spec: containers: - name: dicom-viewer GEN3_DICOM-VIEWER_IMAGE + env: + - name: DD_ENABLED + valueFrom: + configMapKeyRef: + name: manifest-global + key: dd_enabled + optional: true + - name: DD_ENV + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/env'] + - name: DD_SERVICE + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/service'] + - name: DD_VERSION + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/version'] + - name: DD_LOGS_INJECTION + value: "true" + - name: DD_AGENT_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP imagePullPolicy: Always readinessProbe: httpGet: From 2bb923a07ce3aa392154cf2940e21984f4679e3d Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Mon, 31 Oct 2022 13:11:36 -0700 Subject: [PATCH 10/29] update jenkins versions (#2066) --- Docker/Jenkins-CI-Worker/Dockerfile | 2 +- Docker/Jenkins-Worker/Dockerfile | 2 +- Docker/Jenkins/Dockerfile | 2 +- Docker/Jenkins2/Dockerfile | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Docker/Jenkins-CI-Worker/Dockerfile b/Docker/Jenkins-CI-Worker/Dockerfile index 3ed282c80..6da26cb87 100644 --- a/Docker/Jenkins-CI-Worker/Dockerfile +++ b/Docker/Jenkins-CI-Worker/Dockerfile @@ -1,4 +1,4 @@ -FROM jenkins/jnlp-slave:4.9-1 +FROM jenkins/jnlp-slave:4.13.3-1 USER root diff --git a/Docker/Jenkins-Worker/Dockerfile b/Docker/Jenkins-Worker/Dockerfile index 5fd7db839..2136d76e3 100644 --- a/Docker/Jenkins-Worker/Dockerfile +++ b/Docker/Jenkins-Worker/Dockerfile @@ -1,4 +1,4 @@ -FROM jenkins/jnlp-slave:4.3-1 +FROM jenkins/jnlp-slave:4.13.3-1 USER root diff --git a/Docker/Jenkins/Dockerfile b/Docker/Jenkins/Dockerfile index e06eb7b71..3db580b91 100644 --- a/Docker/Jenkins/Dockerfile +++ b/Docker/Jenkins/Dockerfile @@ -1,4 +1,4 @@ -FROM jenkins/jenkins:2.298 +FROM jenkins/jenkins:2.375 USER root diff --git a/Docker/Jenkins2/Dockerfile b/Docker/Jenkins2/Dockerfile index 26f81c143..9d07df981 100644 --- a/Docker/Jenkins2/Dockerfile +++ b/Docker/Jenkins2/Dockerfile @@ -1,4 +1,4 @@ -FROM jenkins/jenkins:2.298 +FROM jenkins/jenkins:2.375 USER root From da2478d9364ec9dc2f107eccc8920e491db163c8 Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Tue, 1 Nov 2022 08:36:52 -0700 Subject: [PATCH 11/29] GH Actions updated fror jenkins images (#2067) * GH Actions updated fror jenkins images * fix names * full path * try relative path * revert * add buildcontext * remove python-pip * remove python-virutalenv * update docker file * update docker files --- .github/workflows/image_build_push.yaml | 36 +++++++++++++++++++++++++ .secrets.baseline | 8 +++--- Docker/Jenkins-CI-Worker/Dockerfile | 8 +++--- Docker/Jenkins-Worker/Dockerfile | 8 +++--- Docker/Jenkins/Dockerfile | 9 +++---- 5 files changed, 49 insertions(+), 20 deletions(-) diff --git a/.github/workflows/image_build_push.yaml b/.github/workflows/image_build_push.yaml index 51543f0fe..898a65670 100644 --- a/.github/workflows/image_build_push.yaml +++ b/.github/workflows/image_build_push.yaml @@ -40,3 +40,39 @@ jobs: ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} + jenkins: + name: Jenkins Build and Push + uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master + with: + DOCKERFILE_LOCATION: "./Docker/Jenkins/Dockerfile" + DOCKERFILE_BUILD_CONTEXT: "./Docker/Jenkins" + OVERRIDE_REPO_NAME: "jenkins" + secrets: + ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} + ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} + QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} + QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} + jenkins-ci-worker: + name: Jenkins-CI-Worker Build and Push + uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master + with: + DOCKERFILE_LOCATION: "./Docker/Jenkins-CI-Worker/Dockerfile" + DOCKERFILE_BUILD_CONTEXT: "./Docker/Jenkins-CI-Worker" + OVERRIDE_REPO_NAME: "jenkins-ci-worker" + secrets: + ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} + ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} + QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} + QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} + jenkins-qa-worker: + name: Jenkins-QA-Worker Build and Push + uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master + with: + DOCKERFILE_LOCATION: "./Docker/Jenkins-Worker/Dockerfile" + DOCKERFILE_BUILD_CONTEXT: "./Docker/Jenkins-Worker" + OVERRIDE_REPO_NAME: "jenkins-qa-worker" + secrets: + ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} + ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} + QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} + QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} diff --git a/.secrets.baseline b/.secrets.baseline index 7a459b129..ddc2050f3 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "^.secrets.baseline$", "lines": null }, - "generated_at": "2022-07-29T15:31:31Z", + "generated_at": "2022-10-31T23:55:07Z", "plugins_used": [ { "name": "AWSKeyDetector" @@ -79,7 +79,7 @@ "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", "is_secret": false, "is_verified": false, - "line_number": 124, + "line_number": 122, "type": "Secret Keyword" } ], @@ -88,7 +88,7 @@ "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", "is_secret": false, "is_verified": false, - "line_number": 138, + "line_number": 136, "type": "Secret Keyword" } ], @@ -97,7 +97,7 @@ "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", "is_secret": false, "is_verified": false, - "line_number": 113, + "line_number": 110, "type": "Secret Keyword" } ], diff --git a/Docker/Jenkins-CI-Worker/Dockerfile b/Docker/Jenkins-CI-Worker/Dockerfile index 6da26cb87..a22d81248 100644 --- a/Docker/Jenkins-CI-Worker/Dockerfile +++ b/Docker/Jenkins-CI-Worker/Dockerfile @@ -5,7 +5,7 @@ USER root ENV DEBIAN_FRONTEND=noninteractive # install python -RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python-pip python3 python3-pip python3-venv build-essential zip unzip jq less vim gettext-base +RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python-pip python3 python3-pip python3-venv build-essential zip unzip jq less vim gettext-base wget RUN set -xe && apt-get update \ && apt-get install -y lsb-release \ @@ -72,7 +72,7 @@ RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - \ && chmod a+rx /usr/local/bin/docker-compose # install nodejs -RUN curl -sL https://deb.nodesource.com/setup_12.x | bash - +RUN curl -sL https://deb.nodesource.com/setup_18.x | bash - RUN apt-get update && apt-get install -y nodejs # add psql: https://www.postgresql.org/download/linux/debian/ @@ -98,9 +98,7 @@ RUN sed -i 's/python3/python3.8/' /usr/bin/lsb_release && \ sed -i 's/python3/python3.8/' /usr/bin/add-apt-repository # install aws cli, poetry, pytest, etc. -RUN set -xe && python3.8 -m pip install awscli --upgrade && python3.8 -m pip install pytest --upgrade && python3.8 -m pip install poetry && python3.8 -m pip install PyYAML --upgrade && python3.8 -m pip install lxml --upgrade && python3.8 -m pip install yq --upgrade && python3.8 -m pip install datadog --upgrade - -RUN curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python3.8 - +RUN set -xe && python3.8 -m pip install --upgrade pip && python3.8 -m pip install awscli --upgrade && python3.8 -m pip install pytest --upgrade && python3.8 -m pip install poetry && python3.8 -m pip install PyYAML --upgrade && python3.8 -m pip install lxml --upgrade && python3.8 -m pip install yq --upgrade && python3.8 -m pip install datadog --upgrade # install terraform RUN curl -o /tmp/terraform.zip https://releases.hashicorp.com/terraform/0.11.15/terraform_0.11.15_linux_amd64.zip \ diff --git a/Docker/Jenkins-Worker/Dockerfile b/Docker/Jenkins-Worker/Dockerfile index 2136d76e3..58d098d85 100644 --- a/Docker/Jenkins-Worker/Dockerfile +++ b/Docker/Jenkins-Worker/Dockerfile @@ -5,7 +5,7 @@ USER root ENV DEBIAN_FRONTEND=noninteractive # install python and pip and aws cli -RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python-pip python3 python3-pip build-essential libgit2-dev zip unzip less vim gettext-base +RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python-pip python3 python3-pip build-essential libgit2-dev zip unzip less vim gettext-base wget RUN set -xe && python -m pip install awscli --upgrade && python -m pip install pytest --upgrade && python -m pip install PyYAML --upgrade && python -m pip install lxml --upgrade RUN set -xe && python3 -m pip install pytest --upgrade && python3 -m pip install PyYAML --upgrade RUN set -xe && python -m pip install yq --upgrade && python3 -m pip install yq --upgrade @@ -84,7 +84,7 @@ RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - \ && chmod a+rx /usr/local/bin/docker-compose # install nodejs -RUN curl -sL https://deb.nodesource.com/setup_14.x | bash - +RUN curl -sL https://deb.nodesource.com/setup_18.x | bash - RUN apt-get update && apt-get install -y nodejs # install chrome (supports headless mode) @@ -129,9 +129,7 @@ RUN sed -i 's/python3/python3.7/' /usr/bin/lsb_release && \ sed -i 's/python3/python3.7/' /usr/bin/add-apt-repository # install aws cli, poetry, pytest, etc. -RUN set -xe && python3.8 -m pip install awscli --upgrade && python3.8 -m pip install pytest --upgrade && python3.8 -m pip install poetry && python3.8 -m pip install PyYAML --upgrade && python3.8 -m pip install lxml --upgrade && python3.8 -m pip install yq --upgrade - -RUN curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python3.8 - +RUN set -xe && python3.8 -m pip install --upgrade pip && python3.8 -m pip install awscli --upgrade && python3.8 -m pip install pytest --upgrade && python3.8 -m pip install poetry && python3.8 -m pip install PyYAML --upgrade && python3.8 -m pip install lxml --upgrade && python3.8 -m pip install yq --upgrade # update /etc/sudoers RUN sed 's/^%sudo/#%sudo/' /etc/sudoers > /etc/sudoers.bak \ diff --git a/Docker/Jenkins/Dockerfile b/Docker/Jenkins/Dockerfile index 3db580b91..b0d579ec3 100644 --- a/Docker/Jenkins/Dockerfile +++ b/Docker/Jenkins/Dockerfile @@ -5,7 +5,7 @@ USER root ENV DEBIAN_FRONTEND=noninteractive # install python -RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python-pip python3 python3-pip python3-venv build-essential zip unzip jq less vim gettext-base +RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python3 python3-pip python3-venv build-essential zip unzip jq less vim gettext-base wget RUN set -xe && apt-get update \ && apt-get install -y lsb-release \ @@ -25,7 +25,6 @@ RUN set -xe && apt-get update \ libbz2-dev \ libexpat1-dev \ liblzma-dev \ - python-virtualenv \ lua5.3 \ r-base \ software-properties-common \ @@ -60,7 +59,7 @@ RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - \ && chmod a+rx /usr/local/bin/docker-compose # install nodejs -RUN curl -sL https://deb.nodesource.com/setup_12.x | bash - +RUN curl -sL https://deb.nodesource.com/setup_18.x | bash - RUN apt-get update && apt-get install -y nodejs # add psql: https://www.postgresql.org/download/linux/debian/ @@ -86,9 +85,7 @@ RUN sed -i 's/python3/python3.5/' /usr/bin/lsb_release && \ sed -i 's/python3/python3.5/' /usr/bin/add-apt-repository # install aws cli, poetry, pytest, etc. -RUN set -xe && python3 -m pip install awscli --upgrade && python3 -m pip install pytest --upgrade && python3 -m pip install poetry && python3 -m pip install PyYAML --upgrade && python3 -m pip install lxml --upgrade && python3 -m pip install yq --upgrade - -RUN curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python3 - +RUN set -xe && python3 -m pip install --upgrade pip && python3 -m pip install awscli --upgrade && python3 -m pip install pytest --upgrade && python3 -m pip install poetry && python3 -m pip install PyYAML --upgrade && python3 -m pip install lxml --upgrade && python3 -m pip install yq --upgrade # install chrome (supports headless mode) RUN set -xe \ From 5c3ba19151404cc6995f012dfd347f9fca69ca5e Mon Sep 17 00:00:00 2001 From: jawadqur <55899496+jawadqur@users.noreply.github.com> Date: Tue, 1 Nov 2022 15:40:23 -0500 Subject: [PATCH 12/29] BRH-301: Extend hatchery to launch prismacloud containers (#2061) --- gen3/bin/kube-setup-hatchery.sh | 9 +++++++++ kube/services/hatchery/hatchery-deploy.yaml | 12 ++++++++++++ 2 files changed, 21 insertions(+) diff --git a/gen3/bin/kube-setup-hatchery.sh b/gen3/bin/kube-setup-hatchery.sh index b3eb659b8..1192c293e 100644 --- a/gen3/bin/kube-setup-hatchery.sh +++ b/gen3/bin/kube-setup-hatchery.sh @@ -82,6 +82,15 @@ if ! g3kubectl get sa "$saName" -o json | jq -e '.metadata.annotations | ."eks.a gen3 awsrole attach-policy "arn:aws:iam::aws:policy/AWSResourceAccessManagerFullAccess" --role-name ${roleName} --force-aws-cli || exit 1 fi +if [[ -f "$(gen3_secrets_folder)/prisma/apikey.json" ]]; then + ACCESSKEYID=$(jq -r .AccessKeyID "$(gen3_secrets_folder)/prisma/apikey.json") + SECRETKEY=$(jq -r .SecretKey "$(gen3_secrets_folder)/prisma/apikey.json") + if [[ ! -z "$ACCESSKEYID" && ! -z "$SECRETKEY" ]]; then + gen3_log_info "Found prisma apikey, creating kubernetes secret so hatchery can do prismacloud stuff.." + g3kubectl delete secret prisma-secret --ignore-not-found + g3kubectl create secret generic prisma-secret --from-literal=AccessKeyId=$ACCESSKEYID --from-literal=SecretKey=$SECRETKEY + fi +fi g3kubectl apply -f "${GEN3_HOME}/kube/services/hatchery/hatchery-service.yaml" gen3 roll hatchery diff --git a/kube/services/hatchery/hatchery-deploy.yaml b/kube/services/hatchery/hatchery-deploy.yaml index 5ac1bb805..f67100098 100644 --- a/kube/services/hatchery/hatchery-deploy.yaml +++ b/kube/services/hatchery/hatchery-deploy.yaml @@ -104,6 +104,18 @@ spec: valueFrom: fieldRef: fieldPath: status.hostIP + - name: PRISMA_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: prisma-secret + key: AccessKeyId + optional: true + - name: PRISMA_SECRET_KEY + valueFrom: + secretKeyRef: + name: prisma-secret + key: SecretKey + optional: true volumeMounts: - name: hatchery-config readOnly: true From 38476a3f879a4c24453b3937bfb6cd90268825ef Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Wed, 2 Nov 2022 07:43:32 -0700 Subject: [PATCH 13/29] use jdk11 images for jenkins workers (#2068) * use jdk11 images for jenkins workers * update jenkins main deployment --- Docker/Jenkins-CI-Worker/Dockerfile | 2 +- Docker/Jenkins-Worker/Dockerfile | 2 +- Docker/Jenkins/Dockerfile | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Docker/Jenkins-CI-Worker/Dockerfile b/Docker/Jenkins-CI-Worker/Dockerfile index a22d81248..08d047e52 100644 --- a/Docker/Jenkins-CI-Worker/Dockerfile +++ b/Docker/Jenkins-CI-Worker/Dockerfile @@ -1,4 +1,4 @@ -FROM jenkins/jnlp-slave:4.13.3-1 +FROM jenkins/jnlp-slave:4.13.3-1-jdk11 USER root diff --git a/Docker/Jenkins-Worker/Dockerfile b/Docker/Jenkins-Worker/Dockerfile index 58d098d85..0ad941def 100644 --- a/Docker/Jenkins-Worker/Dockerfile +++ b/Docker/Jenkins-Worker/Dockerfile @@ -1,4 +1,4 @@ -FROM jenkins/jnlp-slave:4.13.3-1 +FROM jenkins/jnlp-slave:4.13.3-1-jdk11 USER root diff --git a/Docker/Jenkins/Dockerfile b/Docker/Jenkins/Dockerfile index b0d579ec3..a872ee1dd 100644 --- a/Docker/Jenkins/Dockerfile +++ b/Docker/Jenkins/Dockerfile @@ -114,7 +114,7 @@ RUN sed 's/^%sudo/#%sudo/' /etc/sudoers > /etc/sudoers.bak \ # add our custom start script COPY jenkins.sh /opt/cdis/bin/jenkins.sh RUN chmod -R a+rx /opt/cdis -ENTRYPOINT ["/sbin/tini", "--", "/opt/cdis/bin/jenkins.sh"] +ENTRYPOINT ["/usr/bin/tini", "--", "/opt/cdis/bin/jenkins.sh"] USER jenkins From a395b08cc8e2eaa2c05b96aa736b302f932a06a6 Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Mon, 7 Nov 2022 12:23:48 -0600 Subject: [PATCH 14/29] fix: dbohdsi is the new label to have Atlas DB access (#2071) * fix: dbohdsi is the new label to have Atlas DB access * fix: change label for OHDSI WebAPI --- kube/services/cohort-middleware/cohort-middleware-deploy.yaml | 2 +- kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/kube/services/cohort-middleware/cohort-middleware-deploy.yaml b/kube/services/cohort-middleware/cohort-middleware-deploy.yaml index a21d97900..e301856e5 100644 --- a/kube/services/cohort-middleware/cohort-middleware-deploy.yaml +++ b/kube/services/cohort-middleware/cohort-middleware-deploy.yaml @@ -19,7 +19,7 @@ spec: metadata: labels: app: cohort-middleware - dbatlas: "yes" + dbohdsi: "yes" dbomop-data: "yes" public: "yes" tags.datadoghq.com/service: "cohort-middleware" diff --git a/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml b/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml index f720ec530..2f4e57d47 100644 --- a/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml +++ b/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml @@ -17,7 +17,7 @@ spec: metadata: labels: app: ohdsi-webapi - dbohdsi-webapi: "yes" + dbohdsi: "yes" dbomop-data: "yes" internet: "yes" public: "yes" From 015dcb1356f75ab25a3b20500fd1b82920f66632 Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Tue, 8 Nov 2022 14:26:03 -0600 Subject: [PATCH 15/29] feat(ohdsi): updates for 2.12 (#2072) --- gen3/bin/kube-setup-ohdsi.sh | 4 ++-- .../ohdsi-atlas-config-local.yaml} | 0 .../ohdsi-webapi-config.yaml} | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) rename kube/services/{ohdsi/ohdsi-configmap.yaml => ohdsi-atlas/ohdsi-atlas-config-local.yaml} (100%) rename kube/services/{ohdsi/ohdsi-secrets.yaml => ohdsi-webapi/ohdsi-webapi-config.yaml} (97%) diff --git a/gen3/bin/kube-setup-ohdsi.sh b/gen3/bin/kube-setup-ohdsi.sh index 0a4d3b7a8..52ae20b33 100644 --- a/gen3/bin/kube-setup-ohdsi.sh +++ b/gen3/bin/kube-setup-ohdsi.sh @@ -90,7 +90,7 @@ setup_secrets() { export FENCE_METADATA_URL="https://${hostname}/.well-known/openid-configuration" export FENCE_CLIENT_ID=$(jq -r ".FENCE_CLIENT_ID" <<< "$appcreds") export FENCE_CLIENT_SECRET=$(jq -r ".FENCE_CLIENT_SECRET" <<< "$appcreds") - envsubst <"${GEN3_HOME}/kube/services/ohdsi/ohdsi-secrets.yaml" | g3kubectl apply -f - + envsubst <"${GEN3_HOME}/kube/services/ohdsi-webapi/ohdsi-webapi-config.yaml" | g3kubectl apply -f - envsubst '$hostname' <"${GEN3_HOME}/kube/services/ohdsi-webapi/ohdsi-webapi-reverse-proxy-config.yaml" | g3kubectl apply -f - ) @@ -123,7 +123,7 @@ setup_creds setup_secrets setup_ingress -envsubst <${GEN3_HOME}/kube/services/ohdsi/ohdsi-configmap.yaml | g3kubectl apply -f - +envsubst <${GEN3_HOME}/kube/services/ohdsi/ohdsi-atlas-config-local.yaml | g3kubectl apply -f - gen3 roll ohdsi-webapi g3kubectl apply -f "${GEN3_HOME}/kube/services/ohdsi-webapi/ohdsi-webapi-service.yaml" diff --git a/kube/services/ohdsi/ohdsi-configmap.yaml b/kube/services/ohdsi-atlas/ohdsi-atlas-config-local.yaml similarity index 100% rename from kube/services/ohdsi/ohdsi-configmap.yaml rename to kube/services/ohdsi-atlas/ohdsi-atlas-config-local.yaml diff --git a/kube/services/ohdsi/ohdsi-secrets.yaml b/kube/services/ohdsi-webapi/ohdsi-webapi-config.yaml similarity index 97% rename from kube/services/ohdsi/ohdsi-secrets.yaml rename to kube/services/ohdsi-webapi/ohdsi-webapi-config.yaml index 7b84c7964..5cd46edd9 100644 --- a/kube/services/ohdsi/ohdsi-secrets.yaml +++ b/kube/services/ohdsi-webapi/ohdsi-webapi-config.yaml @@ -19,13 +19,12 @@ stringData: flyway_datasource_username: $DB_USER flyway_datasource_password: $DB_PASS flyway_locations: classpath:db/migration/postgresql - # Zoe testing Atlas-Fence + security_cors_enabled: "true" security_origin: "*" security_token_expiration: "43200" security_ssl_enabled: "false" -# security_provider: DisabledSecurity security_provider: AtlasRegularSecurity security_auth_windows_enabled: "false" @@ -50,6 +49,7 @@ stringData: security_oid_url: https://$hostname/.well-known/openid-configuration security_oid_redirectUrl: https://atlas.$hostname/atlas/#/welcome security_oid_logoutUrl: https://atlas.$hostname/atlas/#/home + security_oid_extraScopes: user security_oauth_callback_ui: https://atlas.$hostname/atlas/#/welcome security_oauth_callback_api: https://atlas.$hostname/WebAPI/user/oauth/callback From 4b944f4f69acb98737b0463cb0b64639f335154e Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Tue, 8 Nov 2022 16:22:02 -0600 Subject: [PATCH 16/29] fix(ohdsi): fix path for Atlas config (#2073) --- gen3/bin/kube-setup-ohdsi.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gen3/bin/kube-setup-ohdsi.sh b/gen3/bin/kube-setup-ohdsi.sh index 52ae20b33..d586570db 100644 --- a/gen3/bin/kube-setup-ohdsi.sh +++ b/gen3/bin/kube-setup-ohdsi.sh @@ -123,7 +123,7 @@ setup_creds setup_secrets setup_ingress -envsubst <${GEN3_HOME}/kube/services/ohdsi/ohdsi-atlas-config-local.yaml | g3kubectl apply -f - +envsubst <${GEN3_HOME}/kube/services/ohdsi-atlas/ohdsi-atlas-config-local.yaml | g3kubectl apply -f - gen3 roll ohdsi-webapi g3kubectl apply -f "${GEN3_HOME}/kube/services/ohdsi-webapi/ohdsi-webapi-service.yaml" From 27d182bd6507689e725314825d63c9544d533f77 Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Fri, 11 Nov 2022 13:21:52 -0800 Subject: [PATCH 17/29] Chore/GitHub actions for jenkins images (#2074) * build jenkins images when Dockerfile changes * add path filter * remove jenkins action from the generic workflow --- .github/workflows/image_build_push.yaml | 36 -------------- .../workflows/image_build_push_jenkins.yaml | 47 +++++++++++++++++++ .../Jenkins-CI-Worker/Dockerfile | 0 .../{ => jenkins}/Jenkins-CI-Worker/README.md | 0 .../Jenkins-CI-Worker/certfix.sh | 0 .../Jenkins-CI-Worker/install-python3.8.sh | 0 .../{ => jenkins}/Jenkins-Worker/Dockerfile | 0 Docker/{ => jenkins}/Jenkins-Worker/README.md | 0 .../Jenkins-Worker/install-python3.8.sh | 0 Docker/{ => jenkins}/Jenkins/Dockerfile | 0 Docker/{ => jenkins}/Jenkins/README.md | 0 .../Jenkins/install-python3.8.sh | 0 Docker/{ => jenkins}/Jenkins/jenkins.sh | 0 Docker/{ => jenkins}/Jenkins2/Dockerfile | 0 Docker/{ => jenkins}/Jenkins2/README.md | 0 .../Jenkins2/install-python3.8.sh | 0 .../Jenkins2/jenkins-master-deployment.yaml | 0 Docker/{ => jenkins}/Jenkins2/jenkins.values | 0 Docker/{ => jenkins}/Jenkins2/jenkins2.sh | 0 .../k8sjenkins-agent-master-policy.yaml | 0 .../Jenkins2/k8sjenkins-agent-policy.yaml | 0 21 files changed, 47 insertions(+), 36 deletions(-) create mode 100644 .github/workflows/image_build_push_jenkins.yaml rename Docker/{ => jenkins}/Jenkins-CI-Worker/Dockerfile (100%) rename Docker/{ => jenkins}/Jenkins-CI-Worker/README.md (100%) rename Docker/{ => jenkins}/Jenkins-CI-Worker/certfix.sh (100%) rename Docker/{ => jenkins}/Jenkins-CI-Worker/install-python3.8.sh (100%) rename Docker/{ => jenkins}/Jenkins-Worker/Dockerfile (100%) rename Docker/{ => jenkins}/Jenkins-Worker/README.md (100%) rename Docker/{ => jenkins}/Jenkins-Worker/install-python3.8.sh (100%) rename Docker/{ => jenkins}/Jenkins/Dockerfile (100%) rename Docker/{ => jenkins}/Jenkins/README.md (100%) rename Docker/{ => jenkins}/Jenkins/install-python3.8.sh (100%) rename Docker/{ => jenkins}/Jenkins/jenkins.sh (100%) rename Docker/{ => jenkins}/Jenkins2/Dockerfile (100%) rename Docker/{ => jenkins}/Jenkins2/README.md (100%) rename Docker/{ => jenkins}/Jenkins2/install-python3.8.sh (100%) rename Docker/{ => jenkins}/Jenkins2/jenkins-master-deployment.yaml (100%) rename Docker/{ => jenkins}/Jenkins2/jenkins.values (100%) rename Docker/{ => jenkins}/Jenkins2/jenkins2.sh (100%) rename Docker/{ => jenkins}/Jenkins2/k8sjenkins-agent-master-policy.yaml (100%) rename Docker/{ => jenkins}/Jenkins2/k8sjenkins-agent-policy.yaml (100%) diff --git a/.github/workflows/image_build_push.yaml b/.github/workflows/image_build_push.yaml index 898a65670..51543f0fe 100644 --- a/.github/workflows/image_build_push.yaml +++ b/.github/workflows/image_build_push.yaml @@ -40,39 +40,3 @@ jobs: ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} - jenkins: - name: Jenkins Build and Push - uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master - with: - DOCKERFILE_LOCATION: "./Docker/Jenkins/Dockerfile" - DOCKERFILE_BUILD_CONTEXT: "./Docker/Jenkins" - OVERRIDE_REPO_NAME: "jenkins" - secrets: - ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} - ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} - QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} - QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} - jenkins-ci-worker: - name: Jenkins-CI-Worker Build and Push - uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master - with: - DOCKERFILE_LOCATION: "./Docker/Jenkins-CI-Worker/Dockerfile" - DOCKERFILE_BUILD_CONTEXT: "./Docker/Jenkins-CI-Worker" - OVERRIDE_REPO_NAME: "jenkins-ci-worker" - secrets: - ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} - ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} - QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} - QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} - jenkins-qa-worker: - name: Jenkins-QA-Worker Build and Push - uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master - with: - DOCKERFILE_LOCATION: "./Docker/Jenkins-Worker/Dockerfile" - DOCKERFILE_BUILD_CONTEXT: "./Docker/Jenkins-Worker" - OVERRIDE_REPO_NAME: "jenkins-qa-worker" - secrets: - ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} - ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} - QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} - QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} diff --git a/.github/workflows/image_build_push_jenkins.yaml b/.github/workflows/image_build_push_jenkins.yaml new file mode 100644 index 000000000..466fc1f68 --- /dev/null +++ b/.github/workflows/image_build_push_jenkins.yaml @@ -0,0 +1,47 @@ +name: Build Python Base Images and Push to Quay and ECR + +on: + push: + paths: + - Docker/jenkins/** + +jobs: + jenkins: + name: Jenkins Build and Push + uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master + with: + DOCKERFILE_LOCATION: "./Docker/jenkins/Jenkins/Dockerfile" + DOCKERFILE_BUILD_CONTEXT: "./Docker/jenkins/Jenkins" + OVERRIDE_REPO_NAME: "jenkins" + USE_QUAY_ONLY: true + secrets: + ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} + ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} + QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} + QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} + jenkins-ci-worker: + name: Jenkins-CI-Worker Build and Push + uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master + with: + DOCKERFILE_LOCATION: "./Docker/jenkins/Jenkins-CI-Worker/Dockerfile" + DOCKERFILE_BUILD_CONTEXT: "./Docker/jenkins/Jenkins-CI-Worker" + OVERRIDE_REPO_NAME: "jenkins-ci-worker" + USE_QUAY_ONLY: true + secrets: + ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} + ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} + QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} + QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} + jenkins-qa-worker: + name: Jenkins-QA-Worker Build and Push + uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master + with: + DOCKERFILE_LOCATION: "./Docker/jenkins/Jenkins-Worker/Dockerfile" + DOCKERFILE_BUILD_CONTEXT: "./Docker/jenkins/Jenkins-Worker" + OVERRIDE_REPO_NAME: "jenkins-qa-worker" + USE_QUAY_ONLY: true + secrets: + ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} + ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} + QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} + QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} diff --git a/Docker/Jenkins-CI-Worker/Dockerfile b/Docker/jenkins/Jenkins-CI-Worker/Dockerfile similarity index 100% rename from Docker/Jenkins-CI-Worker/Dockerfile rename to Docker/jenkins/Jenkins-CI-Worker/Dockerfile diff --git a/Docker/Jenkins-CI-Worker/README.md b/Docker/jenkins/Jenkins-CI-Worker/README.md similarity index 100% rename from Docker/Jenkins-CI-Worker/README.md rename to Docker/jenkins/Jenkins-CI-Worker/README.md diff --git a/Docker/Jenkins-CI-Worker/certfix.sh b/Docker/jenkins/Jenkins-CI-Worker/certfix.sh similarity index 100% rename from Docker/Jenkins-CI-Worker/certfix.sh rename to Docker/jenkins/Jenkins-CI-Worker/certfix.sh diff --git a/Docker/Jenkins-CI-Worker/install-python3.8.sh b/Docker/jenkins/Jenkins-CI-Worker/install-python3.8.sh similarity index 100% rename from Docker/Jenkins-CI-Worker/install-python3.8.sh rename to Docker/jenkins/Jenkins-CI-Worker/install-python3.8.sh diff --git a/Docker/Jenkins-Worker/Dockerfile b/Docker/jenkins/Jenkins-Worker/Dockerfile similarity index 100% rename from Docker/Jenkins-Worker/Dockerfile rename to Docker/jenkins/Jenkins-Worker/Dockerfile diff --git a/Docker/Jenkins-Worker/README.md b/Docker/jenkins/Jenkins-Worker/README.md similarity index 100% rename from Docker/Jenkins-Worker/README.md rename to Docker/jenkins/Jenkins-Worker/README.md diff --git a/Docker/Jenkins-Worker/install-python3.8.sh b/Docker/jenkins/Jenkins-Worker/install-python3.8.sh similarity index 100% rename from Docker/Jenkins-Worker/install-python3.8.sh rename to Docker/jenkins/Jenkins-Worker/install-python3.8.sh diff --git a/Docker/Jenkins/Dockerfile b/Docker/jenkins/Jenkins/Dockerfile similarity index 100% rename from Docker/Jenkins/Dockerfile rename to Docker/jenkins/Jenkins/Dockerfile diff --git a/Docker/Jenkins/README.md b/Docker/jenkins/Jenkins/README.md similarity index 100% rename from Docker/Jenkins/README.md rename to Docker/jenkins/Jenkins/README.md diff --git a/Docker/Jenkins/install-python3.8.sh b/Docker/jenkins/Jenkins/install-python3.8.sh similarity index 100% rename from Docker/Jenkins/install-python3.8.sh rename to Docker/jenkins/Jenkins/install-python3.8.sh diff --git a/Docker/Jenkins/jenkins.sh b/Docker/jenkins/Jenkins/jenkins.sh similarity index 100% rename from Docker/Jenkins/jenkins.sh rename to Docker/jenkins/Jenkins/jenkins.sh diff --git a/Docker/Jenkins2/Dockerfile b/Docker/jenkins/Jenkins2/Dockerfile similarity index 100% rename from Docker/Jenkins2/Dockerfile rename to Docker/jenkins/Jenkins2/Dockerfile diff --git a/Docker/Jenkins2/README.md b/Docker/jenkins/Jenkins2/README.md similarity index 100% rename from Docker/Jenkins2/README.md rename to Docker/jenkins/Jenkins2/README.md diff --git a/Docker/Jenkins2/install-python3.8.sh b/Docker/jenkins/Jenkins2/install-python3.8.sh similarity index 100% rename from Docker/Jenkins2/install-python3.8.sh rename to Docker/jenkins/Jenkins2/install-python3.8.sh diff --git a/Docker/Jenkins2/jenkins-master-deployment.yaml b/Docker/jenkins/Jenkins2/jenkins-master-deployment.yaml similarity index 100% rename from Docker/Jenkins2/jenkins-master-deployment.yaml rename to Docker/jenkins/Jenkins2/jenkins-master-deployment.yaml diff --git a/Docker/Jenkins2/jenkins.values b/Docker/jenkins/Jenkins2/jenkins.values similarity index 100% rename from Docker/Jenkins2/jenkins.values rename to Docker/jenkins/Jenkins2/jenkins.values diff --git a/Docker/Jenkins2/jenkins2.sh b/Docker/jenkins/Jenkins2/jenkins2.sh similarity index 100% rename from Docker/Jenkins2/jenkins2.sh rename to Docker/jenkins/Jenkins2/jenkins2.sh diff --git a/Docker/Jenkins2/k8sjenkins-agent-master-policy.yaml b/Docker/jenkins/Jenkins2/k8sjenkins-agent-master-policy.yaml similarity index 100% rename from Docker/Jenkins2/k8sjenkins-agent-master-policy.yaml rename to Docker/jenkins/Jenkins2/k8sjenkins-agent-master-policy.yaml diff --git a/Docker/Jenkins2/k8sjenkins-agent-policy.yaml b/Docker/jenkins/Jenkins2/k8sjenkins-agent-policy.yaml similarity index 100% rename from Docker/Jenkins2/k8sjenkins-agent-policy.yaml rename to Docker/jenkins/Jenkins2/k8sjenkins-agent-policy.yaml From 644044fc7847334797ce4f8a081377e626d6a02f Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Fri, 18 Nov 2022 11:02:45 -0700 Subject: [PATCH 18/29] enabling USM for datadog (#2080) --- .secrets.baseline | 4 ++-- kube/services/datadog/values.yaml | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/.secrets.baseline b/.secrets.baseline index ddc2050f3..e087e9243 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "^.secrets.baseline$", "lines": null }, - "generated_at": "2022-10-31T23:55:07Z", + "generated_at": "2022-11-17T21:04:51Z", "plugins_used": [ { "name": "AWSKeyDetector" @@ -740,7 +740,7 @@ "hashed_secret": "52330dffa4d0795b4199a66428e54eca228e1661", "is_secret": false, "is_verified": false, - "line_number": 15, + "line_number": 20, "type": "Secret Keyword" } ], diff --git a/kube/services/datadog/values.yaml b/kube/services/datadog/values.yaml index 47896e4f0..95ec57239 100644 --- a/kube/services/datadog/values.yaml +++ b/kube/services/datadog/values.yaml @@ -10,6 +10,11 @@ datadog: useHostPort: true nonLocalTraffic: true + #Enables Optional Universal Service Monitoring + ## ref: https://docs.datadoghq.com/tracing/universal_service_monitoring/?tab=helm + serviceMonitoring: + enabled: true + # datadog.apiKeyExistingSecret -- Use existing Secret which stores API key instead of creating a new one. The value should be set with the `api-key` key inside the secret. ## If set, this parameter takes precedence over "apiKey". apiKeyExistingSecret: "datadog-agent" From 6e4fe582a5c6c53a63bf6d699c26294a76ff12ad Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Fri, 18 Nov 2022 12:03:34 -0600 Subject: [PATCH 19/29] (feat): add opencost report job for argo -> s3 (#2077) * (feat): add opencost report job for argo -> s3 --- .../jobs/opencost-report-argo-job.yaml | 55 +++++++++++++++++++ kube/services/jobs/opencost-report-job.yaml | 36 ------------ 2 files changed, 55 insertions(+), 36 deletions(-) create mode 100644 kube/services/jobs/opencost-report-argo-job.yaml delete mode 100644 kube/services/jobs/opencost-report-job.yaml diff --git a/kube/services/jobs/opencost-report-argo-job.yaml b/kube/services/jobs/opencost-report-argo-job.yaml new file mode 100644 index 000000000..9b8809cd7 --- /dev/null +++ b/kube/services/jobs/opencost-report-argo-job.yaml @@ -0,0 +1,55 @@ +# +# run with: +# gen3 job run opencost-report-argo \ +# BUCKET_NAME $GEN3_BUCKET_NAME \ +# OPENCOST_URL $OPENCOST_URL \ +# +# BUCKET_NAME(required) +# Name of the bucket to upload the generated reports to. +# Make sure that there is a service account called "reports-service-account" with access to this bucket. +# +# OPENCOST_URL(optional) +# URL to query OpenCost API's. Default is https://kubecost-cost-analyzer.kubecost +# +# +# Example +# gen3 job run opencost-report-argo BUCKET_NAME opencost-report-bucket +# +# Cronjob Example +# gen3 job cron opencost-report-argo @daily BUCKET_NAME opencost-report-bucket +apiVersion: batch/v1 +kind: Job +metadata: + name: opencost-report-argo +spec: + template: + metadata: + labels: + app: gen3job + spec: + serviceAccountName: reports-service-account + containers: + - name: send-report + GEN3_OPENCOST-REPORTER_IMAGE|-image: quay.io/cdis/proto-opencost-reporter:master-| + imagePullPolicy: Always + env: + - name: OPENCOST_URL + GEN3_OPENCOST_URL|-value: https://kubecost-cost-analyzer.kubecost-| + - name: ENV + valueFrom: + configMapKeyRef: + name: global + key: environment + - name: BUCKET_NAME + GEN3_BUCKET_NAME|-value: ""-| + command: [ "/bin/bash" ] + args: + - "-c" + - | + proto-opencost-reporter GetAllocationReport \ + --from_days_before 9 \ + --to_days_before 1 \ + --aggregate_by label:gen3username \ + --filter_namespaces argo \ + --share_idle_by_node + restartPolicy: Never \ No newline at end of file diff --git a/kube/services/jobs/opencost-report-job.yaml b/kube/services/jobs/opencost-report-job.yaml deleted file mode 100644 index e74aa1084..000000000 --- a/kube/services/jobs/opencost-report-job.yaml +++ /dev/null @@ -1,36 +0,0 @@ ---- -apiVersion: batch/v1 -kind: Job -metadata: - name: opencost-report - namespace: kubecost -spec: - template: - metadata: - labels: - app: gen3job - spec: - serviceAccountName: report-service-account - containers: - - name: send-report - image: quay.io/cdis/awshelper:master - imagePullPolicy: Always - env: - - name: gen3Env - valueFrom: - configMapKeyRef: - name: global - key: environment - - name: JENKINS_HOME - value: "devterm" - - name: GEN3_HOME - value: /home/ubuntu/cloud-automation - - name: bucketName - GEN3_BUCKET_NAME|-value: ""-| - command: [ "/bin/bash" ] - args: - - "-c" - - | - curl -k "https://kubecost-cost-analyzer.kubecost/model/allocation/summary?aggregate=label%3Agen3username&window=7d&accumulate=true&shareIdle=false&idleByNode=false&shareTenancyCosts=true&shareNamespaces=&shareLabels=&shareCost=NaN&shareSplit=weighted" | jq -r . > "report-$(date +"%m-%d-%y").json" - aws s3 cp ./report*.json s3://$bucketName - restartPolicy: Never From 2974cecd8b63e3398b6b32beb23290b061425ddd Mon Sep 17 00:00:00 2001 From: Ajo Augustine Date: Fri, 18 Nov 2022 12:04:20 -0600 Subject: [PATCH 20/29] updated aws account list for ecr access (#2059) --- gen3/bin/ecr.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/gen3/bin/ecr.sh b/gen3/bin/ecr.sh index 5b41f8d2c..23254c5de 100644 --- a/gen3/bin/ecr.sh +++ b/gen3/bin/ecr.sh @@ -31,6 +31,7 @@ accountList=( 980870151884 205252583234 885078588865 +922467707295 ) principalStr="" From 07989eff6fcd3e25895cc79fe5f4121789d2760f Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Mon, 21 Nov 2022 07:42:15 -0800 Subject: [PATCH 21/29] clean up jenkins2 config and upgrade version (#2076) * clean up jenkins2 config and upgrade version * Update jenkins2.sh update jenkins url * fix quay repo names Co-authored-by: Ajo Augustine --- .../workflows/image_build_push_jenkins.yaml | 17 +- Docker/jenkins/Jenkins2/Dockerfile | 11 +- .../Jenkins2/jenkins-master-deployment.yaml | 355 ------------------ Docker/jenkins/Jenkins2/jenkins.values | 39 -- Docker/jenkins/Jenkins2/jenkins2.sh | 14 +- .../k8sjenkins-agent-master-policy.yaml | 18 - .../Jenkins2/k8sjenkins-agent-policy.yaml | 19 - 7 files changed, 32 insertions(+), 441 deletions(-) delete mode 100755 Docker/jenkins/Jenkins2/jenkins-master-deployment.yaml delete mode 100644 Docker/jenkins/Jenkins2/jenkins.values delete mode 100644 Docker/jenkins/Jenkins2/k8sjenkins-agent-master-policy.yaml delete mode 100644 Docker/jenkins/Jenkins2/k8sjenkins-agent-policy.yaml diff --git a/.github/workflows/image_build_push_jenkins.yaml b/.github/workflows/image_build_push_jenkins.yaml index 466fc1f68..d08ac737d 100644 --- a/.github/workflows/image_build_push_jenkins.yaml +++ b/.github/workflows/image_build_push_jenkins.yaml @@ -19,13 +19,26 @@ jobs: ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} + jenkins2: + name: Jenkins2 Build and Push + uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master + with: + DOCKERFILE_LOCATION: "./Docker/jenkins/Jenkins2/Dockerfile" + DOCKERFILE_BUILD_CONTEXT: "./Docker/jenkins/Jenkins2" + OVERRIDE_REPO_NAME: "jenkins2" + USE_QUAY_ONLY: true + secrets: + ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} + ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} + QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} + QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} jenkins-ci-worker: name: Jenkins-CI-Worker Build and Push uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master with: DOCKERFILE_LOCATION: "./Docker/jenkins/Jenkins-CI-Worker/Dockerfile" DOCKERFILE_BUILD_CONTEXT: "./Docker/jenkins/Jenkins-CI-Worker" - OVERRIDE_REPO_NAME: "jenkins-ci-worker" + OVERRIDE_REPO_NAME: "gen3-ci-worker" USE_QUAY_ONLY: true secrets: ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} @@ -38,7 +51,7 @@ jobs: with: DOCKERFILE_LOCATION: "./Docker/jenkins/Jenkins-Worker/Dockerfile" DOCKERFILE_BUILD_CONTEXT: "./Docker/jenkins/Jenkins-Worker" - OVERRIDE_REPO_NAME: "jenkins-qa-worker" + OVERRIDE_REPO_NAME: "gen3-qa-worker" USE_QUAY_ONLY: true secrets: ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} diff --git a/Docker/jenkins/Jenkins2/Dockerfile b/Docker/jenkins/Jenkins2/Dockerfile index 9d07df981..59cb5672e 100644 --- a/Docker/jenkins/Jenkins2/Dockerfile +++ b/Docker/jenkins/Jenkins2/Dockerfile @@ -5,7 +5,7 @@ USER root ENV DEBIAN_FRONTEND=noninteractive # install python -RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python-pip python3 python3-pip python3-venv build-essential zip unzip jq less vim gettext-base +RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python3 python3-pip python3-venv build-essential zip unzip jq less vim gettext-base wget RUN set -xe && apt-get update \ && apt-get install -y lsb-release \ @@ -25,7 +25,6 @@ RUN set -xe && apt-get update \ libbz2-dev \ libexpat1-dev \ liblzma-dev \ - python-virtualenv \ lua5.3 \ r-base \ software-properties-common \ @@ -60,7 +59,7 @@ RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - \ && chmod a+rx /usr/local/bin/docker-compose # install nodejs -RUN curl -sL https://deb.nodesource.com/setup_12.x | bash - +RUN curl -sL https://deb.nodesource.com/setup_18.x | bash - RUN apt-get update && apt-get install -y nodejs # add psql: https://www.postgresql.org/download/linux/debian/ @@ -86,9 +85,7 @@ RUN sed -i 's/python3/python3.5/' /usr/bin/lsb_release && \ sed -i 's/python3/python3.5/' /usr/bin/add-apt-repository # install aws cli, poetry, pytest, etc. -RUN set -xe && python3 -m pip install awscli --upgrade && python3 -m pip install pytest --upgrade && python3 -m pip install poetry && python3 -m pip install PyYAML --upgrade && python3 -m pip install lxml --upgrade && python3 -m pip install yq --upgrade - -RUN curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python3 - +RUN set -xe && python3 -m pip install --upgrade pip && python3 -m pip install awscli --upgrade && python3 -m pip install pytest --upgrade && python3 -m pip install poetry && python3 -m pip install PyYAML --upgrade && python3 -m pip install lxml --upgrade && python3 -m pip install yq --upgrade # install chrome (supports headless mode) RUN set -xe \ @@ -117,7 +114,7 @@ RUN sed 's/^%sudo/#%sudo/' /etc/sudoers > /etc/sudoers.bak \ # add our custom start script COPY jenkins2.sh /opt/cdis/bin/jenkins2.sh RUN chmod -R a+rx /opt/cdis -ENTRYPOINT ["/sbin/tini", "--", "/opt/cdis/bin/jenkins2.sh"] +ENTRYPOINT ["/usr/bin/tini", "--", "/opt/cdis/bin/jenkins2.sh"] USER jenkins diff --git a/Docker/jenkins/Jenkins2/jenkins-master-deployment.yaml b/Docker/jenkins/Jenkins2/jenkins-master-deployment.yaml deleted file mode 100755 index 922711ad1..000000000 --- a/Docker/jenkins/Jenkins2/jenkins-master-deployment.yaml +++ /dev/null @@ -1,355 +0,0 @@ -{{- if .Capabilities.APIVersions.Has "apps/v1" }} -apiVersion: apps/v1 -{{- else }} -apiVersion: apps/v1 -{{- end }} -kind: Deployment -metadata: - name: {{ template "jenkins.fullname" . }} - namespace: {{ template "jenkins.namespace" . }} - labels: - "app.kubernetes.io/name": '{{ template "jenkins.name" .}}' - "helm.sh/chart": "{{ .Chart.Name }}-{{ .Chart.Version }}" - "app.kubernetes.io/managed-by": "{{ .Release.Service }}" - "app.kubernetes.io/instance": "{{ .Release.Name }}" - "app.kubernetes.io/component": "{{ .Values.master.componentName }}" - {{- range $key, $val := .Values.master.deploymentLabels }} - {{ $key }}: {{ $val | quote }} - {{- end}} -spec: - replicas: 1 - strategy: - type: {{ if .Values.persistence.enabled }}Recreate{{ else }}RollingUpdate - rollingUpdate: -{{ toYaml .Values.master.rollingUpdate | indent 6 }} - {{- end }} - selector: - matchLabels: - "app.kubernetes.io/component": "{{ .Values.master.componentName }}" - "app.kubernetes.io/instance": "{{ .Release.Name }}" - template: - metadata: - labels: - "app.kubernetes.io/name": '{{ template "jenkins.name" .}}' - "helm.sh/chart": "{{ .Chart.Name }}-{{ .Chart.Version }}" - "app.kubernetes.io/managed-by": "{{ .Release.Service }}" - "app.kubernetes.io/instance": "{{ .Release.Name }}" - "app.kubernetes.io/component": "{{ .Values.master.componentName }}" - {{- range $key, $val := .Values.master.podLabels }} - {{ $key }}: {{ $val | quote }} - {{- end}} - annotations: - {{- if .Values.master.podAnnotations }} -{{ toYaml .Values.master.podAnnotations | indent 8 }} - {{- end }} - spec: - {{- if .Values.master.nodeSelector }} - nodeSelector: -{{ toYaml .Values.master.nodeSelector | indent 8 }} - {{- end }} - {{- if .Values.master.tolerations }} - tolerations: -{{ toYaml .Values.master.tolerations | indent 8 }} - {{- end }} - {{- if .Values.master.affinity }} - affinity: -{{ toYaml .Values.master.affinity | indent 8 }} - {{- end }} - {{- if and (.Capabilities.APIVersions.Has "scheduling.k8s.io/v1beta1") (.Values.master.priorityClassName) }} - priorityClassName: {{ .Values.master.priorityClassName }} - {{- end }} -{{- if .Values.master.usePodSecurityContext }} - securityContext: - runAsUser: {{ default 0 .Values.master.runAsUser }} -{{- if and (.Values.master.runAsUser) (.Values.master.fsGroup) }} -{{- if not (eq .Values.master.runAsUser 0.0) }} - fsGroup: {{ .Values.master.fsGroup }} -{{- end }} -{{- end }} -{{- end }} - serviceAccountName: "{{ template "jenkins.serviceAccountName" . }}" -{{- if .Values.master.hostNetworking }} - hostNetwork: true - dnsPolicy: ClusterFirstWithHostNet -{{- end }} - {{- if .Values.master.hostAliases }} - hostAliases: - {{- toYaml .Values.master.hostAliases | nindent 8 }} - {{- end }} - initContainers: -{{- if .Values.master.customInitContainers }} -{{ tpl (toYaml .Values.master.customInitContainers) . | indent 8 }} -{{- end }} - - name: "copy-default-config" -{{- if .Values.master.imageTag }} - image: "{{ .Values.master.image }}:{{ .Values.master.imageTag }}" -{{- else }} - image: "{{ .Values.master.image }}:{{ .Values.master.tag }}" -{{- end }} - imagePullPolicy: "{{ .Values.master.imagePullPolicy }}" - command: ["sh", "/var/jenkins_config/apply_config.sh"] - env: - {{- if .Values.master.useSecurity }} - - name: ADMIN_PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "jenkins.fullname" . }} - key: jenkins-admin-password - - name: ADMIN_USER - valueFrom: - secretKeyRef: - name: {{ template "jenkins.fullname" . }} - key: jenkins-admin-user - {{- end }} - {{- if .Values.master.initContainerEnv }} -{{ toYaml .Values.master.initContainerEnv | indent 12 }} - {{- end }} - resources: -{{ toYaml .Values.master.resources | indent 12 }} - volumeMounts: - - mountPath: /tmp - name: tmp - - mountPath: /var/jenkins_home - name: jenkins-home - {{- if .Values.persistence.subPath }} - subPath: {{ .Values.persistence.subPath }} - {{- end }} - - mountPath: /var/jenkins_config - name: jenkins-config - {{- if .Values.master.enableXmlConfig }} - {{- if .Values.master.credentialsXmlSecret }} - - mountPath: /var/jenkins_credentials - name: jenkins-credentials - readOnly: true - {{- end }} - {{- if .Values.master.jobs }} - - mountPath: /var/jenkins_jobs - name: jenkins-jobs - readOnly: true - {{- end }} - - mountPath: /usr/share/jenkins/ref/secrets/ - name: secrets-dir - {{- end }} - {{- if .Values.master.secretsFilesSecret }} - - mountPath: /var/jenkins_secrets - name: jenkins-secrets - readOnly: true - {{- end }} - - mountPath: /usr/share/jenkins/ref/plugins - name: plugins - - mountPath: /var/jenkins_plugins - name: plugin-dir - containers: - - name: jenkins -{{- if .Values.master.imageTag }} - image: "{{ .Values.master.image }}:{{ .Values.master.imageTag }}" -{{- else }} - image: "{{ .Values.master.image }}:{{ .Values.master.tag }}" -{{- end }} - imagePullPolicy: "{{ .Values.master.imagePullPolicy }}" - {{- if .Values.master.useSecurity }} - command: -{{ toYaml .Values.master.command | indent 10 }} - args: -{{ toYaml .Values.master.args | indent 10 }} - {{- end }} - {{- if .Values.master.lifecycle }} - lifecycle: -{{ toYaml .Values.master.lifecycle | indent 12 }} - {{- end }} - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name -{{ toYaml .Values.master.additionalEnv | indent 12 }} - - name: JAVA_OPTS - value: > - {{ default "" .Values.master.javaOpts }} - {{- if .Values.master.sidecars.configAutoReload.enabled }} -Dcasc.reload.token=$(POD_NAME) {{end}} - - name: JENKINS_OPTS - value: "{{ if .Values.master.jenkinsUriPrefix }}--prefix={{ .Values.master.jenkinsUriPrefix }} {{ end }}{{ default "" .Values.master.jenkinsOpts}}" - - name: JENKINS_SLAVE_AGENT_PORT - value: "{{ .Values.master.slaveListenerPort }}" - {{- if .Values.master.useSecurity }} - - name: ADMIN_PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "jenkins.fullname" . }} - key: jenkins-admin-password - - name: ADMIN_USER - valueFrom: - secretKeyRef: - name: {{ template "jenkins.fullname" . }} - key: jenkins-admin-user - {{- end }} - {{- if .Values.master.containerEnv }} -{{ toYaml .Values.master.containerEnv | indent 12 }} - {{- end }} - {{- if .Values.master.JCasC.enabled }} - - name: CASC_JENKINS_CONFIG - value: {{ .Values.master.sidecars.configAutoReload.folder | default "/var/jenkins_home/casc_configs" | quote }} - {{- end }} - ports: - - containerPort: 8080 - name: http - - containerPort: {{ .Values.master.slaveListenerPort }} - name: slavelistener - {{- if .Values.master.slaveHostPort }} - hostPort: {{ .Values.master.slaveHostPort }} - {{- end }} - {{- if .Values.master.jmxPort }} - - containerPort: {{ .Values.master.jmxPort }} - name: jmx - {{- end }} -{{- range $index, $port := .Values.master.extraPorts }} - - containerPort: {{ $port.port }} - name: {{ $port.name }} -{{- end }} -{{- if .Values.master.healthProbes }} - livenessProbe: - httpGet: - path: "{{ default "" .Values.master.jenkinsUriPrefix }}/login" - port: http - initialDelaySeconds: {{ .Values.master.healthProbeLivenessInitialDelay }} - periodSeconds: {{ .Values.master.healthProbeLivenessPeriodSeconds }} - timeoutSeconds: {{ .Values.master.healthProbesLivenessTimeout }} - failureThreshold: {{ .Values.master.healthProbeLivenessFailureThreshold }} - readinessProbe: - httpGet: - path: "{{ default "" .Values.master.jenkinsUriPrefix }}/login" - port: http - initialDelaySeconds: {{ .Values.master.healthProbeReadinessInitialDelay }} - periodSeconds: {{ .Values.master.healthProbeReadinessPeriodSeconds }} - timeoutSeconds: {{ .Values.master.healthProbesReadinessTimeout }} - failureThreshold: {{ .Values.master.healthProbeReadinessFailureThreshold }} -{{- end }} - - resources: -{{ toYaml .Values.master.resources | indent 12 }} - volumeMounts: -{{- if .Values.persistence.mounts }} -{{ toYaml .Values.persistence.mounts | indent 12 }} -{{- end }} - - mountPath: /tmp - name: tmp - - mountPath: /var/jenkins_home - name: jenkins-home - readOnly: false - {{- if .Values.persistence.subPath }} - subPath: {{ .Values.persistence.subPath }} - {{- end }} - - mountPath: /var/jenkins_config - name: jenkins-config - readOnly: true - {{- if .Values.master.enableXmlConfig }} - {{- if .Values.master.credentialsXmlSecret }} - - mountPath: /var/jenkins_credentials - name: jenkins-credentials - readOnly: true - {{- end }} - {{- if .Values.master.jobs }} - - mountPath: /var/jenkins_jobs - name: jenkins-jobs - readOnly: true - {{- end }} - - mountPath: /usr/share/jenkins/ref/secrets/ - name: secrets-dir - readOnly: false - {{- end }} - {{- if or .Values.master.secretsFilesSecret }} - - mountPath: /var/jenkins_secrets - name: jenkins-secrets - readOnly: true - {{- end }} - - mountPath: /usr/share/jenkins/ref/plugins/ - name: plugin-dir - readOnly: false - {{- if and (.Values.master.JCasC.enabled) (.Values.master.sidecars.configAutoReload.enabled) }} - - name: sc-config-volume - mountPath: {{ .Values.master.sidecars.configAutoReload.folder | default "/var/jenkins_home/casc_configs" | quote }} - {{- end }} - -{{- if and (.Values.master.JCasC.enabled) (.Values.master.sidecars.configAutoReload.enabled) }} - - name: jenkins-sc-config - image: "{{ .Values.master.sidecars.configAutoReload.image }}" - imagePullPolicy: {{ .Values.master.sidecars.configAutoReload.imagePullPolicy }} - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: LABEL - value: "{{ template "jenkins.fullname" . }}-jenkins-config" - - name: FOLDER - value: "{{ .Values.master.sidecars.configAutoReload.folder }}" - - name: NAMESPACE - value: "{{ .Values.master.sidecars.configAutoReload.searchNamespace | default .Release.Namespace }}" - - name: REQ_URL - value: "http://localhost:8080/reload-configuration-as-code/?casc-reload-token=$(POD_NAME)" - - name: REQ_METHOD - value: "POST" - resources: -{{ toYaml .Values.master.sidecars.configAutoReload.resources | indent 12 }} - volumeMounts: - - name: sc-config-volume - mountPath: {{ .Values.master.sidecars.configAutoReload.folder | quote }} - - name: jenkins-home - mountPath: /var/jenkins_home - {{- if .Values.persistence.subPath }} - subPath: {{ .Values.persistence.subPath }} - {{- end }} -{{- end}} - - -{{- if .Values.master.sidecars.other}} -{{ tpl (toYaml .Values.master.sidecars.other | indent 8) .}} -{{- end }} - - volumes: -{{- if .Values.persistence.volumes }} -{{ tpl (toYaml .Values.persistence.volumes | indent 6) . }} -{{- end }} - - name: plugins - emptyDir: {} - - name: tmp - emptyDir: {} - - name: jenkins-config - configMap: - name: {{ template "jenkins.fullname" . }} - {{- if .Values.master.enableXmlConfig }} - {{- if .Values.master.credentialsXmlSecret }} - - name: jenkins-credentials - secret: - secretName: {{ .Values.master.credentialsXmlSecret }} - {{- end }} - {{- if .Values.master.jobs }} - - name: jenkins-jobs - configMap: - name: {{ template "jenkins.fullname" . }}-jobs - {{- end }} - - name: secrets-dir - emptyDir: {} - {{- end }} - {{- if .Values.master.secretsFilesSecret }} - - name: jenkins-secrets - secret: - secretName: {{ .Values.master.secretsFilesSecret }} - {{- end }} - - name: plugin-dir - emptyDir: {} - - name: jenkins-home - {{- if .Values.persistence.enabled }} - persistentVolumeClaim: - claimName: {{ .Values.persistence.existingClaim | default (include "jenkins.fullname" .) }} - {{- else }} - emptyDir: {} - {{- end -}} - {{- if .Values.master.JCasC.enabled }} - - name: sc-config-volume - emptyDir: {} - {{- end }} -{{- if .Values.master.imagePullSecretName }} - imagePullSecrets: - - name: {{ .Values.master.imagePullSecretName }} -{{- end -}} diff --git a/Docker/jenkins/Jenkins2/jenkins.values b/Docker/jenkins/Jenkins2/jenkins.values deleted file mode 100644 index 404b59b49..000000000 --- a/Docker/jenkins/Jenkins2/jenkins.values +++ /dev/null @@ -1,39 +0,0 @@ -master: - # Used for label app.kubernetes.io/component - componentName: "k8s-jenkins-master-deployment" - serviceType: NodePort - NodePort: 32323 - adminUser: "admin" - # adminPassword: "" - - image: "quay.io/cdis/k8s-jenkins-master" - tag: "latest" - - installPlugins: false - - podLabels: - app: jenkins - - additionalEnv: - - name: AWS_ACCESS_KEY_ID - valueFrom: - secretKeyRef: - name: jenkins-secret - key: aws_access_key_id - - name: AWS_SECRET_ACCESS_KEY - valueFrom: - secretKeyRef: - name: jenkins-secret - key: aws_secret_access_key - - command: - - /sbin/tini - args: - - -- - - /opt/cdis/bin/jenkins2.sh - -rbac: - create: true -persistence: - size: "200Gi" - diff --git a/Docker/jenkins/Jenkins2/jenkins2.sh b/Docker/jenkins/Jenkins2/jenkins2.sh index c0fb0e4ea..fe4c53329 100644 --- a/Docker/jenkins/Jenkins2/jenkins2.sh +++ b/Docker/jenkins/Jenkins2/jenkins2.sh @@ -16,14 +16,25 @@ if [ -z "$JENKINS_S3_PATH" ]; then JENKINS_S3_PATH="s3://cdis-terraform-state/Jenkins2Backup" fi +# # Setup ~/.aws to support cloud-automation/gen3 +# terraform stuff wants a profile to query +# mkdir -p ~/.aws cat - > ~/.aws/config < ~/.aws/credentials < Date: Mon, 21 Nov 2022 10:52:06 -0800 Subject: [PATCH 22/29] Fix name of workflow (#2083) * Fix name of workflow --- .github/workflows/image_build_push_jenkins.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/image_build_push_jenkins.yaml b/.github/workflows/image_build_push_jenkins.yaml index d08ac737d..ffea50ace 100644 --- a/.github/workflows/image_build_push_jenkins.yaml +++ b/.github/workflows/image_build_push_jenkins.yaml @@ -1,4 +1,4 @@ -name: Build Python Base Images and Push to Quay and ECR +name: Build Jenkins images and push to Quay on: push: From a985b2a1afe0a25e2e5ab25ba279403837ae2229 Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Mon, 28 Nov 2022 08:40:42 -0600 Subject: [PATCH 23/29] Fix apiversion of cronjobs and fix cluster-autoscaler (#2082) --- gen3/bin/job.sh | 2 +- gen3/bin/kube-setup-networkpolicy.sh | 2 +- .../cluster-autoscaler-autodiscover.yaml | 14 ++++++++++++-- 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/gen3/bin/job.sh b/gen3/bin/job.sh index 4a1c03542..09d305957 100644 --- a/gen3/bin/job.sh +++ b/gen3/bin/job.sh @@ -60,7 +60,7 @@ g3k_job2cronjson(){ local cronScript="$(cat - < Date: Tue, 29 Nov 2022 11:26:38 -0600 Subject: [PATCH 24/29] Add addtional label to opencost report (#2085) VADC-328 --- kube/services/jobs/opencost-report-argo-job.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kube/services/jobs/opencost-report-argo-job.yaml b/kube/services/jobs/opencost-report-argo-job.yaml index 9b8809cd7..0f31eca40 100644 --- a/kube/services/jobs/opencost-report-argo-job.yaml +++ b/kube/services/jobs/opencost-report-argo-job.yaml @@ -49,7 +49,7 @@ spec: proto-opencost-reporter GetAllocationReport \ --from_days_before 9 \ --to_days_before 1 \ - --aggregate_by label:gen3username \ + --aggregate_by label:gen3username label:workflows.argoproj.io/workflow \ --filter_namespaces argo \ --share_idle_by_node - restartPolicy: Never \ No newline at end of file + restartPolicy: Never From 89a983210031d5a4f4db2e318033c7f2702017e1 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Tue, 29 Nov 2022 11:53:37 -0700 Subject: [PATCH 25/29] Hiding the revproxy-service-elb behind a flag in the manifest.json (#2079) * Hiding the revproxy-service-elb behind a flag in the manifest.json * updating the documentation to explain the shift from ELB to ALB and difference in WAF defployments. Co-authored-by: J. Q <55899496+jawadqur@users.noreply.github.com> --- .secrets.baseline | 6 +++--- doc/kube-setup-ingress.md | 4 +++- doc/kube-setup-revproxy.md | 3 ++- gen3/bin/kube-setup-revproxy.sh | 15 ++++++++++++++- kube/services/revproxy/README.md | 2 ++ 5 files changed, 24 insertions(+), 6 deletions(-) diff --git a/.secrets.baseline b/.secrets.baseline index e087e9243..8d7d9afb8 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -426,21 +426,21 @@ "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", "is_secret": false, "is_verified": false, - "line_number": 32, + "line_number": 38, "type": "Secret Keyword" }, { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", "is_secret": false, "is_verified": false, - "line_number": 49, + "line_number": 55, "type": "Secret Keyword" }, { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", "is_secret": false, "is_verified": false, - "line_number": 51, + "line_number": 57, "type": "Secret Keyword" } ], diff --git a/doc/kube-setup-ingress.md b/doc/kube-setup-ingress.md index 15b2bd39e..bd4dff29c 100644 --- a/doc/kube-setup-ingress.md +++ b/doc/kube-setup-ingress.md @@ -2,7 +2,9 @@ Setup the aws-load-balancer-controller and an ALB. -This is a replacement for the revproxy-service-elb +This is a replacement for the revproxy-service-elb and WAF + +PLEASE NOTE: This script will now also deploy AWS WAF which will be associated with the ALB. This can be deployed by setting/adding the "waf_enabled" flag to true in the manifest-global configmap (set via the global section of the manifest.json). ## Overview diff --git a/doc/kube-setup-revproxy.md b/doc/kube-setup-revproxy.md index 365d0b129..5c483e12f 100644 --- a/doc/kube-setup-revproxy.md +++ b/doc/kube-setup-revproxy.md @@ -5,6 +5,7 @@ Configure and launch the reverse proxy. ## References * the reverse proxy [readme](../kube/services/revproxy/README.md) has more details. -* WAF - the reverse proxy deploys the [modsecurity web application firewall](./waf.md). +* WAF - the reverse proxy deploys the [modsecurity web application firewall](./waf.md). (This is only deployed if the "deploy_elb" flag is set to true in the manifest-global configmap (set/added via the global section of the manifest.json).deploy the revproxy-ELB-service and WAF) +* Please see https://github.com/uc-cdis/cloud-automation/blob/master/doc/kube-setup-ingress.md as AWS WAF and ALB is recommended. * [maintenance mode](./maintenance.md) * the [ip blacklist](../gen3/lib/manifestDefaults/revproxy/) may be configured with a custom `manifests/revproxy/blacklist.conf` diff --git a/gen3/bin/kube-setup-revproxy.sh b/gen3/bin/kube-setup-revproxy.sh index 9e38fb908..0b6ee74d7 100644 --- a/gen3/bin/kube-setup-revproxy.sh +++ b/gen3/bin/kube-setup-revproxy.sh @@ -13,6 +13,12 @@ set -e source "${GEN3_HOME}/gen3/lib/utils.sh" gen3_load "gen3/gen3setup" +gen3_load "gen3/lib/g3k_manifest" + +# Deploy ELB Service if flag set in manifest +manifestPath=$(g3k_manifest_path) +deployELB="$(jq -r ".[\"global\"][\"deploy_elb\"]" < "$manifestPath" | tr '[:upper:]' '[:lower:]')" + # # Setup indexd basic-auth gateway user creds enforced @@ -255,6 +261,9 @@ export ARN=$(g3kubectl get configmap global --output=jsonpath='{.data.revproxy_a # revproxy deployment using http proxy protocol. # # port 81 == proxy-protocol listener - main service entry + +gen3_deploy_revproxy_elb() { +gen3_log_info "Deploying revproxy-service-elb..." export TARGET_PORT_HTTPS=81 # port 82 == proxy-protocol listener - redirects to https export TARGET_PORT_HTTP=82 @@ -280,6 +289,10 @@ else envsubst <$scriptDir/revproxy-service-elb.yaml gen3_log_info "DRY RUN" fi - +} # Don't automatically apply this right now #kubectl apply -f $scriptDir/revproxy-service.yaml + +if [ "$deployELB" = true ]; then + gen3_deploy_revproxy_elb +fi diff --git a/kube/services/revproxy/README.md b/kube/services/revproxy/README.md index 4cec90df6..8940687d4 100644 --- a/kube/services/revproxy/README.md +++ b/kube/services/revproxy/README.md @@ -10,6 +10,8 @@ as an AWS ELB that terminates HTTPS requests (using an AWS Certificate Manager s forwards http and https traffic to the revproxy deployment using http proxy protocol. +Update: The revproxy-service-elb and WAF is now only applied if you set/add the "waf_enabled" flag to true in the manifest-global configmap (set via the global section of the manifest.json). We now recommend using the ALB Ingress via the kube-setup-ingress script detailed here: https://github.com/uc-cdis/cloud-automation/blob/master/doc/kube-setup-ingress.md + - Create a cert in AWS Certificate Manager, and register it in the global config map. This will require the admin for the domain approve it through email - `gen3 kube-setup-revproxy` - deploys the service - creating an AWS ELB - update DNS to point at the ELB From b24407c42a50a9c2b02851a8a98acb155353ed76 Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Wed, 30 Nov 2022 11:37:43 -0800 Subject: [PATCH 26/29] use node 14 for CI (#2087) --- Docker/jenkins/Jenkins-CI-Worker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docker/jenkins/Jenkins-CI-Worker/Dockerfile b/Docker/jenkins/Jenkins-CI-Worker/Dockerfile index 08d047e52..afb1fca9f 100644 --- a/Docker/jenkins/Jenkins-CI-Worker/Dockerfile +++ b/Docker/jenkins/Jenkins-CI-Worker/Dockerfile @@ -72,7 +72,7 @@ RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - \ && chmod a+rx /usr/local/bin/docker-compose # install nodejs -RUN curl -sL https://deb.nodesource.com/setup_18.x | bash - +RUN curl -sL https://deb.nodesource.com/setup_14.x | bash - RUN apt-get update && apt-get install -y nodejs # add psql: https://www.postgresql.org/download/linux/debian/ From 7b8072c446db41aed07d471abfed04ee0e3463db Mon Sep 17 00:00:00 2001 From: Pauline Ribeyre <4224001+paulineribeyre@users.noreply.github.com> Date: Wed, 30 Nov 2022 17:48:11 -0600 Subject: [PATCH 27/29] PXP-10268 Add 'fence-delete-expired-clients' job and cronjob (#2075) --- gen3/bin/kube-setup-fence.sh | 8 +++ .../fence-delete-expired-clients-job.yaml | 61 +++++++++++++++++++ 2 files changed, 69 insertions(+) create mode 100644 kube/services/jobs/fence-delete-expired-clients-job.yaml diff --git a/gen3/bin/kube-setup-fence.sh b/gen3/bin/kube-setup-fence.sh index 192000b8f..f69f80066 100644 --- a/gen3/bin/kube-setup-fence.sh +++ b/gen3/bin/kube-setup-fence.sh @@ -90,3 +90,11 @@ if isServiceVersionGreaterOrEqual "fence" "6.0.0" "2022.07"; then gen3 job cron fence-visa-update "30 * * * *" fi fi + +# add cronjob for removing expired OIDC clients for required fence versions +if isServiceVersionGreaterOrEqual "fence" "6.2.0" "2023.01"; then + if ! g3kubectl get cronjob fence-delete-expired-clients >/dev/null 2>&1; then + echo "fence-delete-expired-clients being added as a cronjob b/c fence >= 6.2.0 or 2023.01" + gen3 job cron fence-delete-expired-clients "0 7 * * *" + fi +fi diff --git a/kube/services/jobs/fence-delete-expired-clients-job.yaml b/kube/services/jobs/fence-delete-expired-clients-job.yaml new file mode 100644 index 000000000..bac613404 --- /dev/null +++ b/kube/services/jobs/fence-delete-expired-clients-job.yaml @@ -0,0 +1,61 @@ +# Delete all expired Fence OIDC clients and optionally post about expired clients on Slack. +# To set up as a daily cronjob: `gen3 job cron fence-delete-expired-clients "0 7 * * *"` +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: fence-delete-expired-clients +spec: + template: + metadata: + labels: + app: gen3job + spec: + serviceAccountName: useryaml-job + volumes: + - name: yaml-merge + configMap: + name: "fence-yaml-merge" + - name: config-volume + secret: + secretName: "fence-config" + containers: + - name: fence + GEN3_FENCE_IMAGE + imagePullPolicy: Always + env: + - name: PYTHONPATH + value: /var/www/fence + - name: FENCE_PUBLIC_CONFIG + valueFrom: + configMapKeyRef: + name: manifest-fence + key: fence-config-public.yaml + optional: true + - name: slackWebHook + valueFrom: + configMapKeyRef: + name: global + key: slack_webhook + volumeMounts: + - name: "config-volume" + readOnly: true + mountPath: "/var/www/fence/fence-config-secret.yaml" + subPath: fence-config.yaml + - name: "yaml-merge" + readOnly: true + mountPath: "/var/www/fence/yaml_merge.py" + subPath: yaml_merge.py + command: ["/bin/bash"] + args: + - "-c" + - | + echo "${FENCE_PUBLIC_CONFIG:-""}" > "/var/www/fence/fence-config-public.yaml" + python /var/www/fence/yaml_merge.py /var/www/fence/fence-config-public.yaml /var/www/fence/fence-config-secret.yaml > /var/www/fence/fence-config.yaml + if [[ "$slackWebHook" =~ ^http ]]; then + fence-create client-delete-expired --slack-webhook $slackWebHook --warning-days 7 + else + fence-create client-delete-expired + fi + exit $? + restartPolicy: Never From aeb0b85eb492409e42bb9e3d8dcc22e26f94b419 Mon Sep 17 00:00:00 2001 From: Ajo Augustine Date: Mon, 5 Dec 2022 14:17:48 -0600 Subject: [PATCH 28/29] update datadog helm chart version (#2092) --- gen3/bin/kube-setup-datadog.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gen3/bin/kube-setup-datadog.sh b/gen3/bin/kube-setup-datadog.sh index 89f007b09..76019dff9 100644 --- a/gen3/bin/kube-setup-datadog.sh +++ b/gen3/bin/kube-setup-datadog.sh @@ -44,7 +44,7 @@ if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then fi helm repo add datadog https://helm.datadoghq.com --force-update 2> >(grep -v 'This is insecure' >&2) helm repo update 2> >(grep -v 'This is insecure' >&2) - helm upgrade --install datadog -f "$GEN3_HOME/kube/services/datadog/values.yaml" datadog/datadog -n datadog --version 2.33.8 2> >(grep -v 'This is insecure' >&2) + helm upgrade --install datadog -f "$GEN3_HOME/kube/services/datadog/values.yaml" datadog/datadog -n datadog --version 3.1.9 2> >(grep -v 'This is insecure' >&2) ) else gen3_log_info "kube-setup-datadog exiting - datadog already deployed, use --force to redeploy" From 54fe92826f489b13915552e517885ab1144f1902 Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Mon, 5 Dec 2022 15:46:10 -0600 Subject: [PATCH 29/29] feat: add argo-wrapper, cohort-middleware and OHDSI tools to roll all (#2084) --- gen3/bin/kube-roll-all.sh | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/gen3/bin/kube-roll-all.sh b/gen3/bin/kube-roll-all.sh index 8b1abe88a..d93ac7600 100644 --- a/gen3/bin/kube-roll-all.sh +++ b/gen3/bin/kube-roll-all.sh @@ -314,6 +314,24 @@ else gen3_log_info "not deploying kayako-wrapper - no manifest entry for '.versions[\"kayako-wrapper\"]'" fi +if g3k_manifest_lookup '.versions["argo-wrapper"]' 2> /dev/null; then + gen3 kube-setup-argo-wrapper & +else + gen3_log_info "not deploying argo-wrapper - no manifest entry for '.versions[\"argo-wrapper\"]'" +fi + +if g3k_manifest_lookup '.versions["cohort-middleware"]' 2> /dev/null; then + gen3 roll cohort-middleware & +else + gen3_log_info "not deploying cohort-middleware - no manifest entry for '.versions[\"cohort-middleware\"]'" +fi + +if g3k_manifest_lookup '.versions["ohdsi-atlas"]' && g3k_manifest_lookup '.versions["ohdsi-webapi"]' 2> /dev/null; then + gen3 kube-setup-ohdsi & +else + gen3_log_info "not deploying OHDSI tools - no manifest entry for '.versions[\"ohdsi-atlas\"]' and '.versions[\"ohdsi-webapi\"]'" +fi + gen3_log_info "enable network policy" gen3 kube-setup-networkpolicy "enable" || true &